2009-10-13 05:58:37 +00:00
# -*- coding: utf-8 -*-
2008-10-14 13:23:55 +00:00
##############################################################################
2009-11-26 12:39:16 +00:00
#
2009-01-12 11:31:51 +00:00
# OpenERP, Open Source Management Solution
2010-01-12 09:18:39 +00:00
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
2008-10-14 13:23:55 +00:00
#
2008-11-03 19:18:56 +00:00
# This program is free software: you can redistribute it and/or modify
2009-10-14 11:15:34 +00:00
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
2008-09-26 14:23:55 +00:00
#
2008-11-03 19:18:56 +00:00
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2009-10-14 11:15:34 +00:00
# GNU Affero General Public License for more details.
2008-09-26 14:23:55 +00:00
#
2009-10-14 11:15:34 +00:00
# You should have received a copy of the GNU Affero General Public License
2009-11-26 12:39:16 +00:00
# along with this program. If not, see <http://www.gnu.org/licenses/>.
2008-09-26 14:23:55 +00:00
#
##############################################################################
import base64
from osv import osv , fields
import os
2010-08-10 12:29:57 +00:00
# from psycopg2 import Binary
2009-12-02 05:36:57 +00:00
#from tools import config
2008-12-16 12:35:15 +00:00
import tools
2009-03-06 22:18:24 +00:00
from tools . translate import _
2009-12-02 05:36:57 +00:00
import nodes
2010-10-12 10:40:15 +00:00
import logging
2008-09-26 14:23:55 +00:00
2010-02-24 09:32:10 +00:00
DMS_ROOT_PATH = tools . config . get ( ' document_path ' , os . path . join ( tools . config [ ' root_path ' ] , ' filestore ' ) )
2008-09-26 14:23:55 +00:00
class document_file ( osv . osv ) :
2008-10-24 13:33:07 +00:00
_inherit = ' ir.attachment '
2009-08-06 06:35:53 +00:00
_rec_name = ' datas_fname '
2010-12-23 09:51:09 +00:00
def attach_parent_id ( self , cr , uid , ids = [ ] , context = None ) :
""" Attach Parent id For document """
parent_id = self . pool . get ( ' document.directory ' ) . _get_root_directory ( cr , uid )
ids = self . search ( cr , uid , [ ( ' parent_id ' , ' = ' , False ) ] )
attach_doc = self . browse ( cr , uid , ids , context = context )
for attach in attach_doc :
cr . execute ( " UPDATE ir_attachment SET parent_id = %s ,db_datas = decode(encode( %s , ' escape ' ), ' base64 ' ) WHERE id = %s " , ( parent_id , attach . db_datas , attach . id ) )
return True
2009-01-22 23:52:23 +00:00
def _get_filestore ( self , cr ) :
2010-02-24 09:32:10 +00:00
return os . path . join ( DMS_ROOT_PATH , cr . dbname )
2009-01-22 23:52:23 +00:00
2010-11-19 13:48:01 +00:00
def _data_get ( self , cr , uid , ids , name , arg , context = None ) :
2010-11-23 07:05:05 +00:00
if context is None :
2010-11-19 13:48:01 +00:00
context = { }
2010-02-24 08:54:04 +00:00
fbrl = self . browse ( cr , uid , ids , context = context )
2010-12-23 09:51:09 +00:00
nctx = nodes . get_node_context ( cr , uid , context = context )
2010-07-13 10:30:05 +00:00
# nctx will /not/ inherit the caller's context. Most of
2010-10-12 06:26:08 +00:00
# it would be useless, anyway (like active_id, active_model,
2010-07-13 10:30:05 +00:00
# bin_size etc.)
2008-10-24 13:33:07 +00:00
result = { }
2009-12-02 05:36:57 +00:00
bin_size = context . get ( ' bin_size ' , False )
for fbro in fbrl :
2010-12-23 09:51:09 +00:00
fnode = nodes . node_file ( None , None , nctx , fbro )
2010-09-06 10:40:54 +00:00
if not bin_size :
2010-12-23 09:51:09 +00:00
data = fnode . get_data ( cr , fbro )
result [ fbro . id ] = base64 . encodestring ( data or ' ' )
2010-09-06 10:40:54 +00:00
else :
2010-12-23 09:51:09 +00:00
result [ fbro . id ] = fnode . get_data_len ( cr , fbro )
2008-10-24 13:33:07 +00:00
return result
#
# This code can be improved
#
2010-11-19 13:48:01 +00:00
def _data_set ( self , cr , uid , id , name , value , arg , context = None ) :
2008-10-24 13:33:07 +00:00
if not value :
return True
2010-02-24 08:54:04 +00:00
fbro = self . browse ( cr , uid , id , context = context )
2010-07-13 10:30:05 +00:00
nctx = nodes . get_node_context ( cr , uid , context = { } )
2010-02-24 08:54:04 +00:00
fnode = nodes . node_file ( None , None , nctx , fbro )
res = fnode . set_data ( cr , base64 . decodestring ( value ) , fbro )
2009-12-02 05:36:57 +00:00
return res
2008-10-24 13:33:07 +00:00
_columns = {
2010-06-27 20:18:09 +00:00
# Columns from ir.attachment:
' create_date ' : fields . datetime ( ' Date Created ' , readonly = True ) ,
' create_uid ' : fields . many2one ( ' res.users ' , ' Creator ' , readonly = True ) ,
' write_date ' : fields . datetime ( ' Date Modified ' , readonly = True ) ,
' write_uid ' : fields . many2one ( ' res.users ' , ' Last Modification User ' , readonly = True ) ,
2010-10-27 10:23:24 +00:00
' res_model ' : fields . char ( ' Attached Model ' , size = 64 , readonly = True , change_default = True ) ,
2010-06-29 14:00:40 +00:00
' res_id ' : fields . integer ( ' Attached ID ' , readonly = True ) ,
2010-06-27 20:18:09 +00:00
# If ir.attachment contained any data before document is installed, preserve
# the data, don't drop the column!
' db_datas ' : fields . binary ( ' Data ' , oldname = ' datas ' ) ,
' datas ' : fields . function ( _data_get , method = True , fnct_inv = _data_set , string = ' File Content ' , type = " binary " , nodrop = True ) ,
# Fields of document:
2008-10-24 13:33:07 +00:00
' user_id ' : fields . many2one ( ' res.users ' , ' Owner ' , select = 1 ) ,
2010-06-27 20:18:57 +00:00
# 'group_ids': fields.many2many('res.groups', 'document_group_rel', 'item_id', 'group_id', 'Groups'),
2009-12-02 05:36:57 +00:00
# the directory id now is mandatory. It can still be computed automatically.
2010-10-27 10:23:24 +00:00
' parent_id ' : fields . many2one ( ' document.directory ' , ' Directory ' , select = 1 , required = True , change_default = True ) ,
2008-10-24 13:33:07 +00:00
' index_content ' : fields . text ( ' Indexed Content ' ) ,
' partner_id ' : fields . many2one ( ' res.partner ' , ' Partner ' , select = 1 ) ,
2010-06-27 20:18:09 +00:00
' file_size ' : fields . integer ( ' File Size ' , required = True ) ,
' file_type ' : fields . char ( ' Content Type ' , size = 128 ) ,
2010-10-12 06:26:08 +00:00
2010-06-27 20:18:09 +00:00
# fields used for file storage
' store_fname ' : fields . char ( ' Stored Filename ' , size = 200 ) ,
2008-10-24 13:33:07 +00:00
}
2010-10-12 06:26:08 +00:00
_order = " create_date desc "
2008-10-24 13:33:07 +00:00
2010-02-24 08:54:04 +00:00
def __get_def_directory ( self , cr , uid , context = None ) :
2009-12-02 05:36:57 +00:00
dirobj = self . pool . get ( ' document.directory ' )
2010-02-24 08:54:04 +00:00
return dirobj . _get_root_directory ( cr , uid , context )
2009-12-02 05:36:57 +00:00
2008-10-24 13:33:07 +00:00
_defaults = {
2010-02-24 08:54:04 +00:00
' user_id ' : lambda self , cr , uid , ctx : uid ,
' file_size ' : lambda self , cr , uid , ctx : 0 ,
2009-12-02 05:36:57 +00:00
' parent_id ' : __get_def_directory
2008-10-24 13:33:07 +00:00
}
_sql_constraints = [
2010-10-12 10:40:15 +00:00
# filename_uniq is not possible in pure SQL
2009-01-12 11:31:51 +00:00
]
2010-02-24 08:54:04 +00:00
def _check_duplication ( self , cr , uid , vals , ids = [ ] , op = ' create ' ) :
name = vals . get ( ' name ' , False )
parent_id = vals . get ( ' parent_id ' , False )
res_model = vals . get ( ' res_model ' , False )
res_id = vals . get ( ' res_id ' , 0 )
if op == ' write ' :
2010-07-01 17:51:32 +00:00
for file in self . browse ( cr , uid , ids ) : # FIXME fields_only
2008-11-18 13:04:58 +00:00
if not name :
2010-02-24 08:54:04 +00:00
name = file . name
2008-11-18 13:04:58 +00:00
if not parent_id :
2010-02-24 08:54:04 +00:00
parent_id = file . parent_id and file . parent_id . id or False
2008-11-18 13:04:58 +00:00
if not res_model :
2010-02-24 08:54:04 +00:00
res_model = file . res_model and file . res_model or False
2008-11-18 13:04:58 +00:00
if not res_id :
2010-02-24 08:54:04 +00:00
res_id = file . res_id and file . res_id or 0
res = self . search ( cr , uid , [ ( ' id ' , ' <> ' , file . id ) , ( ' name ' , ' = ' , name ) , ( ' parent_id ' , ' = ' , parent_id ) , ( ' res_model ' , ' = ' , res_model ) , ( ' res_id ' , ' = ' , res_id ) ] )
2008-11-18 13:04:58 +00:00
if len ( res ) :
2009-03-03 09:51:57 +00:00
return False
2010-02-24 08:54:04 +00:00
if op == ' create ' :
res = self . search ( cr , uid , [ ( ' name ' , ' = ' , name ) , ( ' parent_id ' , ' = ' , parent_id ) , ( ' res_id ' , ' = ' , res_id ) , ( ' res_model ' , ' = ' , res_model ) ] )
2008-10-24 13:33:07 +00:00
if len ( res ) :
return False
return True
2010-01-27 06:57:08 +00:00
2008-11-18 13:04:58 +00:00
def copy ( self , cr , uid , id , default = None , context = None ) :
if not default :
2010-02-24 08:54:04 +00:00
default = { }
2010-01-27 06:57:08 +00:00
if ' name ' not in default :
name = self . read ( cr , uid , [ id ] ) [ 0 ] [ ' name ' ]
2010-02-24 08:54:04 +00:00
default . update ( { ' name ' : name + " (copy) " } )
2010-11-19 13:48:01 +00:00
return super ( document_file , self ) . copy ( cr , uid , id , default , context = context )
2010-01-27 06:57:08 +00:00
2009-01-12 11:31:51 +00:00
def write ( self , cr , uid , ids , vals , context = None ) :
2010-09-06 10:05:50 +00:00
result = False
2010-03-15 07:27:07 +00:00
if not isinstance ( ids , list ) :
ids = [ ids ]
2010-02-24 08:54:04 +00:00
res = self . search ( cr , uid , [ ( ' id ' , ' in ' , ids ) ] )
2008-12-30 13:56:38 +00:00
if not len ( res ) :
return False
2010-02-24 08:54:04 +00:00
if not self . _check_duplication ( cr , uid , vals , ids , ' write ' ) :
2010-01-25 09:13:19 +00:00
raise osv . except_osv ( _ ( ' ValidateError ' ) , _ ( ' File name must be unique! ' ) )
2010-10-12 06:26:08 +00:00
2010-07-13 20:53:59 +00:00
# if nodes call this write(), they must skip the code below
from_node = context and context . get ( ' __from_node ' , False )
if ( ( ' parent_id ' in vals ) or ( ' name ' in vals ) ) and not from_node :
2010-07-09 08:23:16 +00:00
# perhaps this file is renaming or changing directory
2010-07-13 10:30:05 +00:00
nctx = nodes . get_node_context ( cr , uid , context = { } )
2010-07-01 17:51:31 +00:00
dirobj = self . pool . get ( ' document.directory ' )
2010-07-09 08:23:16 +00:00
if ' parent_id ' in vals :
dbro = dirobj . browse ( cr , uid , vals [ ' parent_id ' ] , context = context )
dnode = nctx . get_dir_node ( cr , dbro )
else :
dbro = None
dnode = None
2010-07-01 17:51:31 +00:00
ids2 = [ ]
for fbro in self . browse ( cr , uid , ids , context = context ) :
2010-07-09 08:23:16 +00:00
if ( ' parent_id ' not in vals or fbro . parent_id . id == vals [ ' parent_id ' ] ) \
2010-12-23 09:51:09 +00:00
and ( ' name ' not in vals or fbro . name == vals [ ' name ' ] ) or not fbro . parent_id :
2010-07-01 17:51:31 +00:00
ids2 . append ( fbro . id )
2010-07-09 08:23:16 +00:00
continue
fnode = nctx . get_file_node ( cr , fbro )
res = fnode . move_to ( cr , dnode or fnode . parent , vals . get ( ' name ' , fbro . name ) , fbro , dbro , True )
if isinstance ( res , dict ) :
vals2 = vals . copy ( )
vals2 . update ( res )
wid = res . get ( ' id ' , fbro . id )
result = super ( document_file , self ) . write ( cr , uid , wid , vals2 , context = context )
# TODO: how to handle/merge several results?
elif res == True :
ids2 . append ( fbro . id )
elif res == False :
pass
2010-07-01 17:51:31 +00:00
ids = ids2
2010-07-01 17:51:33 +00:00
if ' file_size ' in vals : # only write that field using direct SQL calls
del vals [ ' file_size ' ]
2010-07-09 08:23:16 +00:00
if len ( ids ) and len ( vals ) :
2010-07-01 17:51:31 +00:00
result = super ( document_file , self ) . write ( cr , uid , ids , vals , context = context )
2010-07-01 17:51:33 +00:00
cr . commit ( ) # ?
2008-10-24 13:33:07 +00:00
return result
2009-12-02 05:36:57 +00:00
def create ( self , cr , uid , vals , context = None ) :
2010-09-06 10:40:54 +00:00
if context is None :
2009-12-02 05:36:57 +00:00
context = { }
2010-02-24 08:54:04 +00:00
vals [ ' parent_id ' ] = context . get ( ' parent_id ' , False ) or vals . get ( ' parent_id ' , False )
2010-03-18 15:30:43 +00:00
if not vals [ ' parent_id ' ] :
vals [ ' parent_id ' ] = self . pool . get ( ' document.directory ' ) . _get_root_directory ( cr , uid , context )
2010-02-24 08:54:04 +00:00
if not vals . get ( ' res_id ' , False ) and context . get ( ' default_res_id ' , False ) :
vals [ ' res_id ' ] = context . get ( ' default_res_id ' , False )
if not vals . get ( ' res_model ' , False ) and context . get ( ' default_res_model ' , False ) :
vals [ ' res_model ' ] = context . get ( ' default_res_model ' , False )
2010-07-01 17:51:33 +00:00
if vals . get ( ' res_id ' , False ) and vals . get ( ' res_model ' , False ) \
and not vals . get ( ' partner_id ' , False ) :
2010-07-06 12:10:45 +00:00
vals [ ' partner_id ' ] = self . __get_partner_id ( cr , uid , \
2010-07-01 17:51:33 +00:00
vals [ ' res_model ' ] , vals [ ' res_id ' ] , context )
2008-10-24 13:33:07 +00:00
2010-02-24 08:54:04 +00:00
datas = None
if vals . get ( ' link ' , False ) :
2008-10-24 13:33:07 +00:00
import urllib
2010-02-24 08:54:04 +00:00
datas = base64 . encodestring ( urllib . urlopen ( vals [ ' link ' ] ) . read ( ) )
2008-10-24 13:33:07 +00:00
else :
2010-02-24 08:54:04 +00:00
datas = vals . get ( ' datas ' , False )
2009-11-26 12:39:16 +00:00
2010-07-01 17:51:33 +00:00
if datas :
vals [ ' file_size ' ] = len ( datas )
else :
if vals . get ( ' file_size ' ) :
del vals [ ' file_size ' ]
2010-02-24 08:54:04 +00:00
if not self . _check_duplication ( cr , uid , vals ) :
2010-01-25 09:13:19 +00:00
raise osv . except_osv ( _ ( ' ValidateError ' ) , _ ( ' File name must be unique! ' ) )
2010-02-24 08:54:04 +00:00
result = super ( document_file , self ) . create ( cr , uid , vals , context )
2010-07-01 17:51:33 +00:00
cr . commit ( ) # ?
2008-10-24 13:33:07 +00:00
return result
2010-11-19 13:48:01 +00:00
def __get_partner_id ( self , cr , uid , res_model , res_id , context = None ) :
2010-07-01 17:51:33 +00:00
""" A helper to retrieve the associated partner from any res_model+id
It is a hack that will try to discover if the mentioned record is
clearly associated with a partner record .
"""
2010-07-06 12:10:45 +00:00
obj_model = self . pool . get ( res_model )
if obj_model . _name == ' res.partner ' :
return res_id
elif ' partner_id ' in obj_model . _columns and obj_model . _columns [ ' partner_id ' ] . _obj == ' res.partner ' :
2010-07-16 22:18:34 +00:00
bro = obj_model . browse ( cr , uid , res_id , context = context )
2010-07-06 12:10:45 +00:00
return bro . partner_id . id
elif ' address_id ' in obj_model . _columns and obj_model . _columns [ ' address_id ' ] . _obj == ' res.partner.address ' :
2010-07-16 22:18:34 +00:00
bro = obj_model . browse ( cr , uid , res_id , context = context )
2010-07-06 12:10:45 +00:00
return bro . address_id . partner_id . id
2010-07-01 17:51:33 +00:00
return False
2010-08-27 07:39:55 +00:00
def unlink ( self , cr , uid , ids , context = None ) :
2009-12-02 05:36:57 +00:00
stor = self . pool . get ( ' document.storage ' )
2010-02-24 08:54:04 +00:00
unres = [ ]
2009-12-02 05:36:57 +00:00
# We have to do the unlink in 2 stages: prepare a list of actual
# files to be unlinked, update the db (safer to do first, can be
# rolled back) and then unlink the files. The list wouldn't exist
# after we discard the objects
2010-11-11 13:30:06 +00:00
ids = self . search ( cr , uid , [ ( ' id ' , ' in ' , ids ) ] )
2010-11-19 13:48:01 +00:00
for f in self . browse ( cr , uid , ids , context = context ) :
2009-12-02 05:36:57 +00:00
# TODO: update the node cache
2010-08-27 07:39:55 +00:00
par = f . parent_id
storage_id = None
while par :
if par . storage_id :
storage_id = par . storage_id
break
par = par . parent_id
2010-10-05 10:25:18 +00:00
#assert storage_id, "Strange, found file #%s w/o storage!" % f.id #TOCHECK: after run yml, it's fail
if storage_id :
r = stor . prepare_unlink ( cr , uid , storage_id , f )
if r :
unres . append ( r )
2010-10-12 10:40:15 +00:00
else :
logging . getLogger ( ' document ' ) . warning ( " Unlinking attachment # %s %s that has no storage " ,
f . id , f . name )
2009-12-02 05:36:57 +00:00
res = super ( document_file , self ) . unlink ( cr , uid , ids , context )
2010-02-24 08:54:04 +00:00
stor . do_unlink ( cr , uid , unres )
2009-12-02 05:36:57 +00:00
return res
2010-02-24 08:54:04 +00:00
2008-10-07 05:24:01 +00:00
document_file ( )