Merge branch 'master' of openobject-server into mdv-gpl3-py26

Conflicts:
	bin/addons/base/ir/ir.xml
	bin/osv/orm.py
	bin/sql_db.py
	bin/tools/config.py

bzr revid: p_christ@hol.gr-20090122175910-4apjfo1p7iokatm6
This commit is contained in:
P. Christeas 2009-01-22 19:59:10 +02:00
commit 1f6278b32c
63 changed files with 52123 additions and 42000 deletions

View File

@ -1,3 +1,3 @@
./bin/addons/*
./bin/filestore*

View File

@ -56,6 +56,7 @@ if ad != _ad:
# Modules already loaded
loaded = []
class Graph(dict):
def addNode(self, name, deps):
@ -79,6 +80,7 @@ class Graph(dict):
yield module
level += 1
class Singleton(object):
def __new__(cls, name, graph):
@ -90,6 +92,7 @@ class Singleton(object):
graph[name] = inst
return inst
class Node(Singleton):
def __init__(self, name, graph):
@ -107,7 +110,7 @@ class Node(Singleton):
for attr in ('init', 'update', 'demo'):
if hasattr(self, attr):
setattr(node, attr, True)
self.childs.sort(lambda x,y: cmp(x.name, y.name))
self.childs.sort(lambda x, y: cmp(x.name, y.name))
def hasChild(self, name):
return Node(name, self.graph) in self.childs or \
@ -135,9 +138,9 @@ class Node(Singleton):
s += '%s`-> %s' % (' ' * depth, c._pprint(depth+1))
return s
def get_module_path(module):
"""Return the path of the given module.
"""
"""Return the path of the given module."""
if os.path.exists(opj(ad, module)) or os.path.exists(opj(ad, '%s.zip' % module)):
return opj(ad, module)
@ -148,14 +151,16 @@ def get_module_path(module):
logger.notifyChannel('init', netsvc.LOG_WARNING, 'module %s: module not found' % (module,))
return False
def get_module_filetree(module, dir='.'):
path = get_module_path(module)
if not path:
return False
dir = os.path.normpath(dir)
if dir == '.': dir = ''
if dir.startswith('..') or dir[0] == '/':
if dir == '.':
dir = ''
if dir.startswith('..') or (dir and dir[0] == '/'):
raise Exception('Cannot access file outside the module')
if not os.path.isdir(path):
@ -164,51 +169,77 @@ def get_module_filetree(module, dir='.'):
files = ['/'.join(f.split('/')[1:]) for f in zip.namelist()]
else:
files = tools.osutil.listdir(path, True)
tree = {}
for f in files:
if not f.startswith(dir):
continue
f = f[len(dir)+int(not dir.endswith('/')):]
if dir:
f = f[len(dir)+int(not dir.endswith('/')):]
lst = f.split(os.sep)
current = tree
while len(lst) != 1:
current = current.setdefault(lst.pop(0), {})
current[lst.pop(0)] = None
return tree
def get_module_as_zip_from_module_directory(module_directory, b64enc=True, src=True):
"""Compress a module directory
@param module_directory: The module directory
@param base64enc: if True the function will encode the zip file with base64
@param src: Integrate the source files
@return: a stream to store in a file-like object
"""
def get_module_as_zip(modulename, b64enc=True, src=True):
RE_exclude = re.compile('(?:^\..+\.swp$)|(?:\.py[oc]$)|(?:\.bak$)|(?:\.~.~$)', re.I)
def _zippy(archive, path, src=True):
path = os.path.abspath(path)
base = os.path.basename(path)
for f in tools.osutil.listdir(path, True):
bf = os.path.basename(f)
if not RE_exclude.search(bf) and (src or bf == '__terp__.py' or not path.endswith('.py')):
if not RE_exclude.search(bf) and (src or bf == '__terp__.py' or not bf.endswith('.py')):
archive.write(os.path.join(path, f), os.path.join(base, f))
ap = get_module_path(str(modulename))
if not ap:
raise Exception('Unable to find path for module %s' % modulename)
ap = ap.encode('utf8')
if os.path.isfile(ap + '.zip'):
val = file(ap + '.zip', 'rb').read()
else:
archname = StringIO()
archive = PyZipFile(archname, "w", ZIP_DEFLATED)
archive.writepy(ap)
_zippy(archive, ap, src=src)
archive.close()
val = archname.getvalue()
archname.close()
archname = StringIO()
archive = PyZipFile(archname, "w", ZIP_DEFLATED)
archive.writepy(module_directory)
_zippy(archive, module_directory, src=src)
archive.close()
val = archname.getvalue()
archname.close()
if b64enc:
val = base64.encodestring(val)
return val
def get_module_as_zip(modulename, b64enc=True, src=True):
"""Generate a module as zip file with the source or not and can do a base64 encoding
@param modulename: The module name
@param b64enc: if True the function will encode the zip file with base64
@param src: Integrate the source files
@return: a stream to store in a file-like object
"""
ap = get_module_path(str(modulename))
if not ap:
raise Exception('Unable to find path for module %s' % modulename)
ap = ap.encode('utf8')
if os.path.isfile(ap + '.zip'):
val = file(ap + '.zip', 'rb').read()
if b64enc:
val = base64.encodestring(val)
else:
val = get_module_as_zip_from_module_directory(ap, b64enc, src)
return val
@ -223,6 +254,7 @@ def get_module_resource(module, *args):
a = get_module_path(module)
return a and opj(a, *args) or False
def get_modules():
"""Returns the list of module names
"""
@ -240,9 +272,10 @@ def get_modules():
return list(set(listdir(ad) + listdir(_ad)))
def create_graph(module_list, force=None):
if not force:
force=[]
force = []
graph = Graph()
packages = []
@ -254,7 +287,8 @@ def create_graph(module_list, force=None):
except IOError:
continue
terp_file = get_module_resource(module, '__terp__.py')
if not terp_file: continue
if not terp_file:
continue
if os.path.isfile(terp_file) or zipfile.is_zipfile(mod_path+'.zip'):
try:
info = eval(tools.file_open(terp_file).read())
@ -263,14 +297,14 @@ def create_graph(module_list, force=None):
raise
if info.get('installable', True):
packages.append((module, info.get('depends', []), info))
dependencies = dict([(p, deps) for p, deps, data in packages])
current, later = set([p for p, dep, data in packages]), set()
while packages and current > later:
package, deps, data = packages[0]
# if all dependencies of 'package' are already in the graph, add 'package' in the graph
if reduce(lambda x,y: x and y in graph, deps, True):
if reduce(lambda x, y: x and y in graph, deps, True):
if not package in current:
packages.pop(0)
continue
@ -286,15 +320,15 @@ def create_graph(module_list, force=None):
later.add(package)
packages.append((package, deps, data))
packages.pop(0)
for package in later:
unmet_deps = filter(lambda p: p not in graph, dependencies[package])
logger.notifyChannel('init', netsvc.LOG_ERROR, 'module %s: Unmet dependencies: %s' % (package, ', '.join(unmet_deps)))
return graph
def init_module_objects(cr, module_name, obj_list):
pool = pooler.get_pool(cr.dbname)
logger.notifyChannel('init', netsvc.LOG_INFO, 'module %s: creating or updating database tables' % module_name)
todo = []
for obj in obj_list:
@ -309,6 +343,7 @@ def init_module_objects(cr, module_name, obj_list):
t[1](cr, *t[2])
cr.commit()
def register_class(m):
"""
Register module named m, if not already registered
@ -316,7 +351,7 @@ def register_class(m):
def log(e):
mt = isinstance(e, zipimport.ZipImportError) and 'zip ' or ''
msg = "Couldn't load%s module %s" % (mt, m)
msg = "Couldn't load %smodule %s" % (mt, m)
logger.notifyChannel('init', netsvc.LOG_CRITICAL, msg)
logger.notifyChannel('init', netsvc.LOG_CRITICAL, e)
@ -325,6 +360,7 @@ def register_class(m):
return
logger.notifyChannel('init', netsvc.LOG_INFO, 'module %s: registering objects' % m)
mod_path = get_module_path(m)
try:
zip_mod_path = mod_path + '.zip'
if not os.path.isfile(zip_mod_path):
@ -349,8 +385,8 @@ class MigrationManager(object):
This class manage the migration of modules
Migrations files must be python files containing a "migrate(cr, installed_version)" function.
Theses files must respect a directory tree structure: A 'migrations' folder which containt a
folder by version. Version can be 'module' version or 'server.module' version (in this case,
the files will only be processed by this version of the server). Python file names must start
folder by version. Version can be 'module' version or 'server.module' version (in this case,
the files will only be processed by this version of the server). Python file names must start
by 'pre' or 'post' and will be executed, respectively, before and after the module initialisation
Example:
@ -366,7 +402,7 @@ class MigrationManager(object):
| `-- post-clean-data.py
`-- foo.py # not processed
This similar structure is generated by the maintenance module with the migrations files get by
This similar structure is generated by the maintenance module with the migrations files get by
the maintenance contract
"""
@ -391,7 +427,6 @@ class MigrationManager(object):
self.migrations[pkg.name]['module'] = get_module_filetree(pkg.name, 'migrations') or {}
self.migrations[pkg.name]['maintenance'] = get_module_filetree('base', 'maintenance/migrations/' + pkg.name) or {}
def migrate_module(self, pkg, stage):
assert stage in ('pre', 'post')
stageformat = {'pre': '[>%s]',
@ -400,7 +435,7 @@ class MigrationManager(object):
if not (hasattr(pkg, 'update') or pkg.state == 'to upgrade'):
return
def convert_version(version):
if version.startswith(release.major_version) and version != release.major_version:
return version # the version number already containt the server version
@ -411,7 +446,7 @@ class MigrationManager(object):
return [d for d in tree if tree[d] is not None]
versions = list(set(
__get_dir(self.migrations[pkg.name]['module']) +
__get_dir(self.migrations[pkg.name]['module']) +
__get_dir(self.migrations[pkg.name]['maintenance'])
))
versions.sort(key=lambda k: parse_version(convert_version(k)))
@ -422,9 +457,9 @@ class MigrationManager(object):
"""
m = self.migrations[pkg.name]
lst = []
mapping = {'module': {'module': pkg.name, 'rootdir': opj('migrations')},
'maintenance': {'module': 'base', 'rootdir': opj('maintenance', 'migrations', pkg.name)},
mapping = {'module': opj(pkg.name, 'migrations'),
'maintenance': opj('base', 'maintenance', 'migrations', pkg.name),
}
for x in mapping.keys():
@ -434,10 +469,11 @@ class MigrationManager(object):
continue
if not f.startswith(stage + '-'):
continue
lst.append((mapping[x]['module'], opj(mapping[x]['rootdir'], version, f)))
lst.append(opj(mapping[x], version, f))
lst.sort()
return lst
def mergedict(a,b):
def mergedict(a, b):
a = a.copy()
a.update(b)
return a
@ -456,26 +492,26 @@ class MigrationManager(object):
'stage': stage,
'version': stageformat[stage] % version,
}
for modulename, pyfile in _get_migration_files(pkg, version, stage):
for pyfile in _get_migration_files(pkg, version, stage):
name, ext = os.path.splitext(os.path.basename(pyfile))
if ext.lower() != '.py':
continue
mod = fp = fp2 = None
try:
fp = tools.file_open(opj(modulename, pyfile))
# imp.load_source need a real file object, so we create
# on from the file-like object we get from file_open
fp = tools.file_open(pyfile)
# imp.load_source need a real file object, so we create
# one from the file-like object we get from file_open
fp2 = os.tmpfile()
fp2.write(fp.read())
fp2.seek(0)
try:
mod = imp.load_source(name, pyfile, fp2)
logger.notifyChannel('migration', netsvc.LOG_INFO, 'module %(addon)s: Running migration %(version)s %(name)s"' % mergedict({'name': mod.__name__},strfmt))
logger.notifyChannel('migration', netsvc.LOG_INFO, 'module %(addon)s: Running migration %(version)s %(name)s' % mergedict({'name': mod.__name__}, strfmt))
mod.migrate(self.cr, pkg.installed_version)
except ImportError:
logger.notifyChannel('migration', netsvc.LOG_ERROR, 'module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % mergedict({'file': opj(modulename,pyfile)}, strfmt))
logger.notifyChannel('migration', netsvc.LOG_ERROR, 'module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % mergedict({'file': pyfile}, strfmt))
raise
except AttributeError:
logger.notifyChannel('migration', netsvc.LOG_ERROR, 'module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt)
@ -488,12 +524,12 @@ class MigrationManager(object):
fp2.close()
if mod:
del mod
def load_module_graph(cr, graph, status=None, perform_checks=True, **kwargs):
# **kwargs is passed directly to convert_xml_import
if not status:
status={}
status = {}
status = status.copy()
package_todo = []
@ -517,21 +553,21 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, **kwargs):
for package in graph:
for k, v in additional_data[package.name].items():
setattr(package, k, v)
migrations = MigrationManager(cr, graph)
check_rules = False
modobj = None
for package in graph:
status['progress'] = (float(statusi)+0.1)/len(graph)
status['progress'] = (float(statusi)+0.1) / len(graph)
m = package.name
mid = package.id
migrations.migrate_module(package, 'pre')
register_class(m)
logger.notifyChannel('init', netsvc.LOG_INFO, 'module %s loading objects' % m)
logger.notifyChannel('init', netsvc.LOG_INFO, 'module %s: loading objects' % m)
modules = pool.instanciate(m, cr)
if modobj is None:
@ -541,14 +577,14 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, **kwargs):
modobj.check(cr, 1, [mid])
idref = {}
status['progress'] = (float(statusi)+0.4)/len(graph)
status['progress'] = (float(statusi)+0.4) / len(graph)
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
check_rules = True
init_module_objects(cr, m, modules)
for kind in ('init', 'update'):
for filename in package.data.get('%s_xml' % kind, []):
mode = 'update'
if hasattr(package, 'init') or package.state=='to install':
if hasattr(package, 'init') or package.state == 'to install':
mode = 'init'
logger.notifyChannel('init', netsvc.LOG_INFO, 'module %s: loading %s' % (m, filename))
name, ext = os.path.splitext(filename)
@ -565,38 +601,35 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, **kwargs):
tools.convert_xml_import(cr, m, fp, idref, mode=mode, **kwargs)
fp.close()
if hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed'):
status['progress'] = (float(statusi)+0.75)/len(graph)
status['progress'] = (float(statusi)+0.75) / len(graph)
for xml in package.data.get('demo_xml', []):
name, ext = os.path.splitext(xml)
logger.notifyChannel('init', netsvc.LOG_INFO, 'module %s: loading %s' % (m, xml))
fp = tools.file_open(opj(m, xml))
if ext == '.csv':
tools.convert_csv_import(cr, m, os.path.basename(xml), fp.read(), idref, noupdate=True)
tools.convert_csv_import(cr, m, os.path.basename(xml), fp.read(), idref, mode=mode, noupdate=True)
else:
tools.convert_xml_import(cr, m, fp, idref, noupdate=True, **kwargs)
tools.convert_xml_import(cr, m, fp, idref, mode=mode, noupdate=True, **kwargs)
fp.close()
cr.execute('update ir_module_module set demo=%s where id=%s', (True, mid))
package_todo.append(package.name)
ver = release.major_version + '.' + package.data.get('version', '1.0')
# update the installed version in database...
#cr.execute("update ir_module_module set state='installed', latest_version=%s where id=%s", (ver, mid,))
# Set new modules and dependencies
modobj.write(cr, 1, [mid], {'state':'installed', 'latest_version':ver})
cr.commit()
# Update translations for all installed languages
if modobj:
ver = release.major_version + '.' + package.data.get('version', '1.0')
# Set new modules and dependencies
modobj.write(cr, 1, [mid], {'state': 'installed', 'latest_version': ver})
# Update translations for all installed languages
modobj.update_translations(cr, 1, [mid], None)
cr.commit()
migrations.migrate_module(package, 'post')
statusi+=1
statusi += 1
if perform_checks and check_rules:
cr.execute("""select model,name from ir_model where id not in (select model_id from ir_model_access)""")
for (model,name) in cr.fetchall():
logger.notifyChannel('init', netsvc.LOG_WARNING, 'object %s (%s) has no access rules!' % (model,name))
for (model, name) in cr.fetchall():
logger.notifyChannel('init', netsvc.LOG_WARNING, 'object %s (%s) has no access rules!' % (model, name))
cr.execute('select model from ir_model where state=%s', ('manual',))
@ -606,9 +639,10 @@ def load_module_graph(cr, graph, status=None, perform_checks=True, **kwargs):
pool.get('ir.model.data')._process_end(cr, 1, package_todo)
cr.commit()
def load_modules(db, force_demo=False, status=None, update_module=False):
if not status:
status={}
status = {}
cr = db.cursor()
force = []
@ -630,20 +664,20 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
ids = modobj.search(cr, 1, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)])
if ids:
modobj.button_install(cr, 1, ids)
mods = [k for k in tools.config['update'] if tools.config['update'][k]]
if mods:
ids = modobj.search(cr, 1, ['&',('state', '=', 'installed'), ('name', 'in', mods)])
ids = modobj.search(cr, 1, ['&', ('state', '=', 'installed'), ('name', 'in', mods)])
if ids:
modobj.button_upgrade(cr, 1, ids)
cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base'))
cr.execute("select name from ir_module_module where state in ('installed', 'to install', 'to upgrade')")
else:
cr.execute("select name from ir_module_module where state in ('installed', 'to upgrade')")
module_list = [name for (name,) in cr.fetchall()]
graph = create_graph(module_list, force)
# the 'base' module has already been updated
base = graph['base']
base.state = 'installed'
@ -656,7 +690,7 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
logger.notifyChannel('init', netsvc.LOG_INFO, report)
for kind in ('init', 'demo', 'update'):
tools.config[kind]={}
tools.config[kind] = {}
cr.commit()
if update_module:
@ -664,7 +698,7 @@ def load_modules(db, force_demo=False, status=None, update_module=False):
for mod_id, mod_name in cr.fetchall():
pool = pooler.get_pool(cr.dbname)
cr.execute('select model,res_id from ir_model_data where noupdate=%s and module=%s order by id desc', (False, mod_name,))
for rmod,rid in cr.fetchall():
for rmod, rid in cr.fetchall():
uid = 1
pool.get(rmod).unlink(cr, uid, [rid])
cr.execute('delete from ir_model_data where noupdate=%s and module=%s', (False, mod_name,))

View File

@ -76,12 +76,12 @@
<field eval="18" name="priority"/>
<field name="arch" type="xml">
<form string="Users">
<field name="password" password="True"/>
<field name="password" password="True" readonly="0"/>
<label colspan="4" string="Please note that you will have to logout and relog if you change your password."/>
<field name="context_lang" completion="1"/>
<field name="context_tz" completion="1"/>
<field name="context_lang" completion="1" readonly="0"/>
<field name="context_tz" completion="1" readonly="0"/>
<newline/>
<field colspan="4" name="signature"/>
<field colspan="4" name="signature" readonly="0"/>
</form>
</field>
</record>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -261,7 +261,8 @@
<form string="Report xml">
<field name="name" select="1"/>
<field name="type" select="1"/>
<field name="model" select="1" />
<field name="model" select="1"/>
<newline/>
<field name="report_name" select="1"/>
<field name="report_xsl"/>
<field name="report_xml"/>
@ -271,6 +272,7 @@
<field name="header"/>
<field name="report_type"/>
<field name="attachment"/>
<field name="attachment_use"/>
<field colspan="4" name="groups_id"/>
</form>
</field>
@ -902,7 +904,7 @@
<field name="model">ir.translation</field>
<field name="type">tree</field>
<field name="arch" type="xml">
<tree editable="bottom" string="Translations">
<tree string="Translations">
<field name="src"/>
<field name="value"/>
<field name="name"/>

View File

@ -122,7 +122,8 @@ class report_xml(osv.osv):
('odt', 'odt'),
], string='Type', required=True),
'groups_id': fields.many2many('res.groups', 'res_groups_report_rel', 'uid', 'gid', 'Groups'),
'attachment': fields.char('Save As Attachment Prefix', size=32, help='This is the prefix of the file name the print will be saved as attachement. Keep empty to not save the printed reports')
'attachment': fields.char('Save As Attachment Prefix', size=128, help='This is the filename of the attachment to store the printing result. Keep empty to not save the printed reports. You can use python expression using the object and time variables.'),
'attachment_use': fields.boolean('Reload from Attachment', help='If you check this, the second time the user print with same attachment name, it returns the previour report.')
}
_defaults = {
'type': lambda *a: 'ir.actions.report.xml',

View File

@ -380,6 +380,7 @@ class ir_model_access(osv.osv):
return res
def create(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods()
res = super(ir_model_access, self).create(cr, uid, *args, **argv)
self.check.clear_cache(cr.dbname) # clear the cache of check function
return res

View File

@ -84,7 +84,6 @@ class ir_ui_menu(osv.osv):
for menu in self.browse(cr, uid, ids):
# this key works because user access rights are all based on user's groups (cfr ir_model_access.check)
key = (cr.dbname, menu.id, tuple(user_groups))
if key in self._cache:
if self._cache[key]:
result.append(menu.id)
@ -191,8 +190,8 @@ class ir_ui_menu(osv.osv):
if context is None:
context = {}
ctx = context.copy()
if 'read_delta' in ctx:
del ctx['read_delta']
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
values_obj = self.pool.get('ir.values')
values_ids = values_obj.search(cursor, user, [
('model', '=', self._name), ('key', '=', 'action'),

View File

@ -43,8 +43,8 @@ class ir_values(osv.osv):
if context is None:
context = {}
ctx = context.copy()
if 'read_delta' in ctx:
del ctx['read_delta']
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
if not self.browse(cursor, user, id, context=context).object:
value = pickle.dumps(eval(value))
self.write(cursor, user, id, {name[:-9]: value}, context=ctx)

View File

@ -462,9 +462,12 @@ class module(osv.osv):
for mod in self.browse(cr, uid, ids):
if mod.state != 'installed':
continue
modpath = addons.get_module_path(mod.name)
if not modpath:
# unable to find the module. we skip
continue
for lang in filter_lang:
f = os.path.join(addons.get_module_path(mod.name), 'i18n', lang + '.po')
f = os.path.join(modpath, 'i18n', lang + '.po')
if os.path.exists(f):
logger.notifyChannel("init", netsvc.LOG_INFO, 'module %s: loading translation file for language %s' % (mod.name, lang))
tools.trans_load(cr.dbname, f, lang, verbose=False)

View File

@ -28,7 +28,7 @@ class Country(osv.osv):
_description = 'Country'
_columns = {
'name': fields.char('Country Name', size=64,
help='The full name of the country.', required=True),
help='The full name of the country.', required=True, translate=True),
'code': fields.char('Country Code', size=2,
help='The ISO country code in two chars.\n'
'You can use this field for quick search.', required=True),

View File

@ -104,7 +104,7 @@ def _contact_title_get(self, cr, uid, context={}):
obj = self.pool.get('res.partner.title')
ids = obj.search(cr, uid, [('domain', '=', 'contact')])
res = obj.read(cr, uid, ids, ['shortcut','name'], context)
return [(r['shortcut'], r['name']) for r in res]
return [(r['shortcut'], r['name']) for r in res] + [('','')]
def _partner_title_get(self, cr, uid, context={}):
obj = self.pool.get('res.partner.title')
@ -116,7 +116,7 @@ def _lang_get(self, cr, uid, context={}):
obj = self.pool.get('res.lang')
ids = obj.search(cr, uid, [], context=context)
res = obj.read(cr, uid, ids, ['code', 'name'], context)
return [(r['code'], r['name']) for r in res]
return [(r['code'], r['name']) for r in res] + [('','')]
class res_partner(osv.osv):
@ -132,7 +132,7 @@ class res_partner(osv.osv):
'ref': fields.char('Code', size=64),
'lang': fields.selection(_lang_get, 'Language', size=5, help="If the selected language is loaded in the system, all documents related to this partner will be printed in this language. If not, it will be english."),
'user_id': fields.many2one('res.users', 'Dedicated Salesman', help='The internal user that is in charge of communicating with this partner if any.'),
'vat': fields.char('VAT',size=32 ,help="Value Added Tax number"),
'vat': fields.char('VAT',size=32 ,help="Value Added Tax number. Check the box if the partner is subjected to the VAT. Used by the VAT legal statement."),
'bank_ids': fields.one2many('res.partner.bank', 'partner_id', 'Banks'),
'website': fields.char('Website',size=64),
'comment': fields.text('Notes'),
@ -285,8 +285,8 @@ class res_partner_address(osv.osv):
'street2': fields.char('Street2', size=128),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City', size=128),
'state_id': fields.many2one("res.country.state", 'State', change_default=True, domain="[('country_id','=',country_id)]"),
'country_id': fields.many2one('res.country', 'Country', change_default=True),
'state_id': fields.many2one("res.country.state", 'Fed. State', domain="[('country_id','=',country_id)]"),
'country_id': fields.many2one('res.country', 'Country'),
'email': fields.char('E-Mail', size=240),
'phone': fields.char('Phone', size=64),
'fax': fields.char('Fax', size=64),
@ -328,6 +328,10 @@ class res_partner_address(osv.osv):
ids += self.search(cr, user, [('name',operator,name)] + args, limit=limit, context=context)
ids += self.search(cr, user, [('partner_id',operator,name)] + args, limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def get_city(self, cr, uid, id):
return self.browse(cr, uid, id).city
res_partner_address()
class res_partner_bank_type(osv.osv):

View File

@ -170,10 +170,10 @@ class users(osv.osv):
self.pool.get('ir.rule').domain_get.clear_cache(cr.dbname)
return res
def unlink(self, cr, uid, ids):
def unlink(self, cr, uid, ids, context=None):
if 1 in ids:
raise osv.except_osv(_('Can not remove root user!'), _('You can not remove the admin user as it is used internally for resources created by OpenERP (updates, module installation, ...)'))
return super(users, self).unlink(cr, uid, ids)
return super(users, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=80):
if not args:

View File

@ -37,6 +37,7 @@
<rng:optional><rng:attribute name="position"/></rng:optional>
<rng:optional><rng:attribute name="link"/></rng:optional>
<rng:optional><rng:attribute name="type"/></rng:optional>
<rng:optional><rng:attribute name="on_write"/></rng:optional>
<rng:zeroOrMore>
<rng:choice>
<rng:ref name="field"/>

32
bin/addons/gen_graph.sh Executable file
View File

@ -0,0 +1,32 @@
#!/bin/bash
##############################################################################
#
# Copyright (c) 2004-2009 TINY SPRL. (http://tiny.be)
#
# $Id$
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contact a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
./module_graph.py $@ | dot -Tpng -o > module_graph.png

View File

@ -21,6 +21,7 @@
#
##############################################################################
# TODO handle the case of zip modules
import os
import sys
@ -46,6 +47,8 @@ while len(modules):
for name in info['depends']:
if name not in done+modules:
modules.append(name)
if not os.path.exists(name):
print '\t%s [color=red]' % (name,)
print '\t%s -> %s;' % (f, name)
print '}'

View File

@ -106,6 +106,7 @@
<rng:optional> <rng:attribute name="auto" /> </rng:optional>
<rng:optional> <rng:attribute name="header" /> </rng:optional>
<rng:optional> <rng:attribute name="attachment" /> </rng:optional>
<rng:optional> <rng:attribute name="attachment_use" /> </rng:optional>
<rng:optional> <rng:attribute name="groups"/> </rng:optional>
<rng:empty />
</rng:element>

View File

@ -228,7 +228,10 @@ class OpenERPDispatcher:
return result
except Exception, e:
self.log('exception', tools.exception_to_unicode(e))
tb = sys.exc_info()
if hasattr(e, 'traceback'):
tb = e.traceback
else:
tb = sys.exc_info()
tb_s = "".join(traceback.format_exception(*tb))
if tools.config['debug_mode']:
import pdb
@ -334,7 +337,7 @@ class HttpDaemon(threading.Thread):
Logger().notifyChannel('xml-rpc-ssl', LOG_CRITICAL, "Can not load the certificate and/or the private key files")
sys.exit(1)
except Exception, e:
Logger().notifyChannel('xml-rpc', LOG_CRITICAL, "Error occur when strarting the server daemon: %s" % (e,))
Logger().notifyChannel('xml-rpc', LOG_CRITICAL, "Error occur when starting the server daemon: %s" % (e,))
sys.exit(1)

View File

@ -71,7 +71,9 @@ import tools
logger.notifyChannel("server", netsvc.LOG_INFO, "version - %s" % release.version )
for name, value in [('addons_path', tools.config['addons_path']),
('database hostname', tools.config['db_host'] or 'localhost')]:
('database hostname', tools.config['db_host'] or 'localhost'),
('database port', tools.config['db_port'] or '5432'),
('database user', tools.config['db_user'])]:
logger.notifyChannel("server", netsvc.LOG_INFO, "%s - %s" % ( name, value ))
import time

View File

@ -34,8 +34,7 @@ class expression(object):
"""
def _is_operator(self, element):
return isinstance(element, str) \
and element in ['&', '|', '!']
return isinstance(element, (str, unicode)) and element in ['&', '|', '!']
def _is_leaf(self, element, internal=False):
OPS = ('=', '!=', '<>', '<=', '<', '>', '>=', '=like', 'like', 'not like', 'ilike', 'not ilike', 'in', 'not in', 'child_of')
@ -192,7 +191,9 @@ class expression(object):
self.__exp = self.__exp[:i] + dom + self.__exp[i+1:]
else:
if isinstance(right, basestring): # and not isinstance(field, fields.related):
res_ids = field_obj.name_search(cr, uid, right, [], operator, limit=None)
c = context.copy()
c['active_test'] = False
res_ids = field_obj.name_search(cr, uid, right, [], operator, limit=None, context=c)
right = map(lambda x: x[0], res_ids)
self.__exp[i] = (left, 'in', right)
else:
@ -244,7 +245,7 @@ class expression(object):
if len_after:
if left == 'id':
instr = ','.join(['%s'] * len_after)
instr = ','.join(['%s'] * len_after)
else:
instr = ','.join([table._columns[left]._symbol_set[0]] * len_after)
query = '(%s.%s %s (%s))' % (table._table, left, operator, instr)

View File

@ -290,6 +290,8 @@ def get_pg_type(f):
f_type = ('float8', 'DOUBLE PRECISION')
elif isinstance(f, fields.function) and f._type == 'selection':
f_type = ('text', 'text')
elif isinstance(f, fields.function) and f._type == 'char':
f_type = ('varchar', 'VARCHAR(%d)' % (f.size))
else:
logger = netsvc.Logger()
logger.notifyChannel("init", netsvc.LOG_WARNING, '%s type not supported!' % (type(f)))
@ -305,6 +307,7 @@ class orm_template(object):
_rec_name = 'name'
_parent_name = 'parent_id'
_parent_store = False
_parent_order = False
_date_name = 'date'
_order = 'id'
_sequence = None
@ -312,6 +315,8 @@ class orm_template(object):
_inherits = {}
_table = None
_invalids = set()
CONCURRENCY_CHECK_FIELD = '__last_update'
def _field_create(self, cr, context={}):
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
@ -449,7 +454,7 @@ class orm_template(object):
break
i += 1
if i == len(f):
data[fpos] = str(r or '')
data[fpos] = tools.ustr(r or '')
return [data] + lines
def export_data(self, cr, uid, ids, fields, context=None):
@ -490,7 +495,7 @@ class orm_template(object):
if line[i]:
if fields_def[field[len(prefix)][:-3]]['type']=='many2many':
res_id = []
for word in line[i].split(','):
for word in line[i].split(config.get('csv_internal_sep')):
if '.' in word:
module, xml_id = word.rsplit('.', 1)
else:
@ -562,7 +567,7 @@ class orm_template(object):
res = []
if line[i]:
relation = fields_def[field[len(prefix)]]['relation']
for word in line[i].split(','):
for word in line[i].split(config.get('csv_internal_sep')):
res2 = self.pool.get(relation).name_search(cr,
uid, word, [], operator='=')
res3 = (res2 and res2[0][0]) or False
@ -781,7 +786,7 @@ class orm_template(object):
def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
return False
def __view_look_dom(self, cr, user, node, context=None):
def __view_look_dom(self, cr, user, node, view_id, context=None):
if not context:
context = {}
result = False
@ -807,7 +812,7 @@ class orm_template(object):
node.removeChild(f)
ctx = context.copy()
ctx['base_model_name'] = self._name
xarch, xfields = self.pool.get(relation).__view_look_dom_arch(cr, user, f, ctx)
xarch, xfields = self.pool.get(relation).__view_look_dom_arch(cr, user, f, view_id, ctx)
views[str(f.localName)] = {
'arch': xarch,
'fields': xfields
@ -857,45 +862,47 @@ class orm_template(object):
if childs:
for f in node.childNodes:
fields.update(self.__view_look_dom(cr, user, f, context))
if ('state' not in fields) and (('state' in self._columns) or ('state' in self._inherit_fields)):
fields['state'] = {}
fields.update(self.__view_look_dom(cr, user, f, view_id, context))
return fields
def __view_look_dom_arch(self, cr, user, node, context=None):
fields_def = self.__view_look_dom(cr, user, node, context=context)
def __view_look_dom_arch(self, cr, user, node, view_id, context=None):
fields_def = self.__view_look_dom(cr, user, node, view_id, context=context)
rolesobj = self.pool.get('res.roles')
usersobj = self.pool.get('res.users')
buttons = xpath.Evaluate('//button', node)
if buttons:
for button in buttons:
if button.getAttribute('type') == 'object':
continue
buttons = xpath.Evaluate("//button[@type != 'object']", node)
for button in buttons:
ok = True
if user != 1: # admin user has all roles
user_roles = usersobj.read(cr, user, [user], ['roles_id'])[0]['roles_id']
cr.execute("select role_id from wkf_transition where signal=%s", (button.getAttribute('name'),))
roles = cr.fetchall()
for role in roles:
if role[0]:
ok = ok and rolesobj.check(cr, user, user_roles, role[0])
ok = True
if user != 1: # admin user has all roles
user_roles = usersobj.read(cr, user, [user], ['roles_id'])[0]['roles_id']
cr.execute("select role_id from wkf_transition where signal=%s", (button.getAttribute('name'),))
roles = cr.fetchall()
for role in roles:
if role[0]:
ok = ok and rolesobj.check(cr, user, user_roles, role[0])
if not ok:
button.setAttribute('readonly', '1')
else:
button.setAttribute('readonly', '0')
if not ok:
button.setAttribute('readonly', '1')
else:
button.setAttribute('readonly', '0')
arch = node.toxml(encoding="utf-8").replace('\t', '')
fields = self.fields_get(cr, user, fields_def.keys(), context)
for field in fields:
if field in fields_def:
for field in fields_def:
if field in fields:
fields[field].update(fields_def[field])
else:
cr.execute('select name, model from ir_ui_view where (id=%s or inherit_id=%s) and arch like %s', (view_id, view_id, '%%%s%%' % field))
res = cr.fetchall()[:]
model = res[0][1]
res.insert(0, ("Can't find field '%s' in the following view parts composing the view of object model '%s':" % (field, model), None))
msg = "\n * ".join([r[0] for r in res])
msg += "\n\nEither you wrongly customized this view, or some modules bringing those views are not compatible with your current data model"
netsvc.Logger().notifyChannel('orm', netsvc.LOG_ERROR, msg)
raise except_orm('View error', msg)
return arch, fields
def __get_default_calendar_view(self):
@ -1082,7 +1089,7 @@ class orm_template(object):
result['view_id'] = 0
doc = dom.minidom.parseString(encode(result['arch']))
xarch, xfields = self.__view_look_dom_arch(cr, user, doc, context=context)
xarch, xfields = self.__view_look_dom_arch(cr, user, doc, view_id, context=context)
result['arch'] = xarch
result['fields'] = xfields
if toolbar:
@ -1367,7 +1374,6 @@ class orm_memory(orm_template):
class orm(orm_template):
_sql_constraints = []
_log_access = True
_table = None
_protected = ['read','write','create','default_get','perm_read','unlink','fields_get','fields_view_get','search','name_get','distinct_field_get','name_search','copy','import_data','search_count']
@ -1379,6 +1385,8 @@ class orm(orm_template):
where = self._parent_name+'='+str(root)
if not root:
where = self._parent_name+' IS NULL'
if self._parent_order:
where += ' order by '+self._parent_order
cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
pos2 = pos + 1
childs = cr.fetchall()
@ -1679,6 +1687,11 @@ class orm(orm_template):
def __init__(self, cr):
super(orm, self).__init__(cr)
if not hasattr(self, '_log_access'):
# if not access is not specify, it is the same value as _auto
self._log_access = not hasattr(self, "_auto") or self._auto
self._columns = self._columns.copy()
for store_field in self._columns:
f = self._columns[store_field]
@ -1873,13 +1886,20 @@ class orm(orm_template):
d1, d2 = self.pool.get('ir.rule').domain_get(cr, user, self._name)
# all inherited fields + all non inherited fields for which the attribute whose name is in load is True
fields_pre = filter(lambda x: x in self._columns and getattr(self._columns[x], '_classic_write'), fields_to_read) + self._inherits.values()
fields_pre = [f for f in fields_to_read if
f == self.CONCURRENCY_CHECK_FIELD
or (f in self._columns and getattr(self._columns[f], '_classic_write'))
] + self._inherits.values()
res = []
if len(fields_pre):
def convert_field(f):
if f in ('create_date', 'write_date'):
return "date_trunc('second', %s) as %s" % (f, f)
if f == self.CONCURRENCY_CHECK_FIELD:
if self._log_access:
return "COALESCE(write_date, create_date, now())::timestamp AS %s" % (f,)
return "now()::timestamp AS %s" % (f,)
if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
return "length(%s) as %s" % (f,f)
return '"%s"' % (f,)
@ -1904,6 +1924,8 @@ class orm(orm_template):
res = map(lambda x: {'id': x}, ids)
for f in fields_pre:
if f == self.CONCURRENCY_CHECK_FIELD:
continue
if self._columns[f].translate:
ids = map(lambda x: x['id'], res)
res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context.get('lang', False) or 'en_US', ids)
@ -2024,26 +2046,32 @@ class orm(orm_template):
return res[ids]
return res
def unlink(self, cr, uid, ids, context=None):
def _check_concurrency(self, cr, ids, context):
if not context:
context = {}
return
if context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access:
def key(oid):
return "%s,%s" % (self._name, oid)
santa = "(id = %s AND %s < COALESCE(write_date, create_date, now())::timestamp)"
for i in range(0, len(ids), cr.IN_MAX):
sub_ids = tools.flatten(((oid, context[self.CONCURRENCY_CHECK_FIELD][key(oid)])
for oid in ids[i:i+cr.IN_MAX]
if key(oid) in context[self.CONCURRENCY_CHECK_FIELD]))
if sub_ids:
cr.execute("SELECT count(1) FROM %s WHERE %s" % (self._table, " OR ".join([santa]*(len(sub_ids)/2))), sub_ids)
res = cr.fetchone()
if res and res[0]:
raise except_orm('ConcurrencyException', _('Records were modified in the meanwhile'))
def unlink(self, cr, uid, ids, context=None):
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
result_store = self._store_get_values(cr, uid, ids, None, context)
delta = context.get('read_delta', False)
if delta and self._log_access:
for i in range(0, len(ids), cr.IN_MAX):
sub_ids = ids[i:i+cr.IN_MAX]
cr.execute("select (now() - min(write_date)) <= '%s'::interval " \
"from \"%s\" where id in (%s)" %
(delta, self._table, ",".join(map(str, sub_ids))))
res = cr.fetchone()
if res and res[0]:
raise except_orm(_('ConcurrencyException'),
_('This record was modified in the meanwhile'))
self._check_concurrency(cr, ids, context)
self.pool.get('ir.model.access').check(cr, uid, self._name, 'unlink')
@ -2124,24 +2152,11 @@ class orm(orm_template):
return True
if isinstance(ids, (int, long)):
ids = [ids]
delta = context.get('read_delta', False)
if delta and self._log_access:
for i in range(0, len(ids), cr.IN_MAX):
sub_ids = ids[i:i+cr.IN_MAX]
cr.execute("select (now() - min(write_date)) <= '%s'::interval " \
"from %s where id in (%s)" %
(delta, self._table, ",".join(map(str, sub_ids))))
res = cr.fetchone()
if res and res[0]:
for field in vals:
if field in self._columns and self._columns[field]._classic_write:
raise except_orm(_('ConcurrencyException'),
_('This record was modified in the meanwhile'))
self._check_concurrency(cr, ids, context)
self.pool.get('ir.model.access').check(cr, user, self._name, 'write')
#for v in self._inherits.values():
# assert v not in vals, (v, vals)
upd0 = []
upd1 = []
upd_todo = []
@ -2289,10 +2304,6 @@ class orm(orm_template):
''', (leftbound,rightbound,cwidth,cleft,cright,treeshift,leftbound,rightbound,
cwidth,cleft,cright,treeshift,leftrange,rightrange))
if 'read_delta' in context:
del context['read_delta']
result = self._store_get_values(cr, user, ids, vals.keys(), context)
for order, object, ids, fields in result:
self.pool.get(object)._store_set_values(cr, user, ids, fields, context)
@ -2324,8 +2335,9 @@ class orm(orm_template):
for f in self._columns.keys(): # + self._inherit_fields.keys():
if not f in vals:
default.append(f)
for f in self._inherit_fields.keys():
if (not f in vals) and (not self._inherit_fields[f][0] in avoid_table):
if (not f in vals) and (self._inherit_fields[f][0] not in avoid_table):
default.append(f)
if len(default):
@ -2344,7 +2356,7 @@ class orm(orm_template):
(table, col, col_detail) = self._inherit_fields[v]
tocreate[table][v] = vals[v]
del vals[v]
# Try-except added to filter the creation of those records whose filds are readonly.
# Example : any dashboard which has all the fields readonly.(due to Views(database views))
try:

View File

@ -0,0 +1,2 @@
from pdf import PdfFileReader, PdfFileWriter
__all__ = ["pdf"]

252
bin/report/pyPdf/filters.py Normal file
View File

@ -0,0 +1,252 @@
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2006, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Implementation of stream filters for PDF.
"""
__author__ = "Mathieu Fenniak"
__author_email__ = "biziqe@mathieu.fenniak.net"
from utils import PdfReadError
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import zlib
def decompress(data):
return zlib.decompress(data)
def compress(data):
return zlib.compress(data)
except ImportError:
# Unable to import zlib. Attempt to use the System.IO.Compression
# library from the .NET framework. (IronPython only)
import System
from System import IO, Collections, Array
def _string_to_bytearr(buf):
retval = Array.CreateInstance(System.Byte, len(buf))
for i in range(len(buf)):
retval[i] = ord(buf[i])
return retval
def _bytearr_to_string(bytes):
retval = ""
for i in range(bytes.Length):
retval += chr(bytes[i])
return retval
def _read_bytes(stream):
ms = IO.MemoryStream()
buf = Array.CreateInstance(System.Byte, 2048)
while True:
bytes = stream.Read(buf, 0, buf.Length)
if bytes == 0:
break
else:
ms.Write(buf, 0, bytes)
retval = ms.ToArray()
ms.Close()
return retval
def decompress(data):
bytes = _string_to_bytearr(data)
ms = IO.MemoryStream()
ms.Write(bytes, 0, bytes.Length)
ms.Position = 0 # fseek 0
gz = IO.Compression.DeflateStream(ms, IO.Compression.CompressionMode.Decompress)
bytes = _read_bytes(gz)
retval = _bytearr_to_string(bytes)
gz.Close()
return retval
def compress(data):
bytes = _string_to_bytearr(data)
ms = IO.MemoryStream()
gz = IO.Compression.DeflateStream(ms, IO.Compression.CompressionMode.Compress, True)
gz.Write(bytes, 0, bytes.Length)
gz.Close()
ms.Position = 0 # fseek 0
bytes = ms.ToArray()
retval = _bytearr_to_string(bytes)
ms.Close()
return retval
class FlateDecode(object):
def decode(data, decodeParms):
data = decompress(data)
predictor = 1
if decodeParms:
predictor = decodeParms.get("/Predictor", 1)
# predictor 1 == no predictor
if predictor != 1:
columns = decodeParms["/Columns"]
# PNG prediction:
if predictor >= 10 and predictor <= 15:
output = StringIO()
# PNG prediction can vary from row to row
rowlength = columns + 1
assert len(data) % rowlength == 0
prev_rowdata = (0,) * rowlength
for row in xrange(len(data) / rowlength):
rowdata = [ord(x) for x in data[(row*rowlength):((row+1)*rowlength)]]
filterByte = rowdata[0]
if filterByte == 0:
pass
elif filterByte == 1:
for i in range(2, rowlength):
rowdata[i] = (rowdata[i] + rowdata[i-1]) % 256
elif filterByte == 2:
for i in range(1, rowlength):
rowdata[i] = (rowdata[i] + prev_rowdata[i]) % 256
else:
# unsupported PNG filter
raise PdfReadError("Unsupported PNG filter %r" % filterByte)
prev_rowdata = rowdata
output.write(''.join([chr(x) for x in rowdata[1:]]))
data = output.getvalue()
else:
# unsupported predictor
raise PdfReadError("Unsupported flatedecode predictor %r" % predictor)
return data
decode = staticmethod(decode)
def encode(data):
return compress(data)
encode = staticmethod(encode)
class ASCIIHexDecode(object):
def decode(data, decodeParms=None):
retval = ""
char = ""
x = 0
while True:
c = data[x]
if c == ">":
break
elif c.isspace():
x += 1
continue
char += c
if len(char) == 2:
retval += chr(int(char, base=16))
char = ""
x += 1
assert char == ""
return retval
decode = staticmethod(decode)
class ASCII85Decode(object):
def decode(data, decodeParms=None):
retval = ""
group = []
x = 0
hitEod = False
# remove all whitespace from data
data = [y for y in data if not (y in ' \n\r\t')]
while not hitEod:
c = data[x]
if len(retval) == 0 and c == "<" and data[x+1] == "~":
x += 2
continue
#elif c.isspace():
# x += 1
# continue
elif c == 'z':
assert len(group) == 0
retval += '\x00\x00\x00\x00'
continue
elif c == "~" and data[x+1] == ">":
if len(group) != 0:
# cannot have a final group of just 1 char
assert len(group) > 1
cnt = len(group) - 1
group += [ 85, 85, 85 ]
hitEod = cnt
else:
break
else:
c = ord(c) - 33
assert c >= 0 and c < 85
group += [ c ]
if len(group) >= 5:
b = group[0] * (85**4) + \
group[1] * (85**3) + \
group[2] * (85**2) + \
group[3] * 85 + \
group[4]
assert b < (2**32 - 1)
c4 = chr((b >> 0) % 256)
c3 = chr((b >> 8) % 256)
c2 = chr((b >> 16) % 256)
c1 = chr(b >> 24)
retval += (c1 + c2 + c3 + c4)
if hitEod:
retval = retval[:-4+hitEod]
group = []
x += 1
return retval
decode = staticmethod(decode)
def decodeStreamData(stream):
from generic import NameObject
filters = stream.get("/Filter", ())
if len(filters) and not isinstance(filters[0], NameObject):
# we have a single filter instance
filters = (filters,)
data = stream._data
for filterType in filters:
if filterType == "/FlateDecode":
data = FlateDecode.decode(data, stream.get("/DecodeParms"))
elif filterType == "/ASCIIHexDecode":
data = ASCIIHexDecode.decode(data)
elif filterType == "/ASCII85Decode":
data = ASCII85Decode.decode(data)
elif filterType == "/Crypt":
decodeParams = stream.get("/DecodeParams", {})
if "/Name" not in decodeParams and "/Type" not in decodeParams:
pass
else:
raise NotImplementedError("/Crypt filter with /Name or /Type not supported yet")
else:
# unsupported filter
raise NotImplementedError("unsupported filter %s" % filterType)
return data
if __name__ == "__main__":
assert "abc" == ASCIIHexDecode.decode('61\n626\n3>')
ascii85Test = """
<~9jqo^BlbD-BleB1DJ+*+F(f,q/0JhKF<GL>Cj@.4Gp$d7F!,L7@<6@)/0JDEF<G%<+EV:2F!,
O<DJ+*.@<*K0@<6L(Df-\\0Ec5e;DffZ(EZee.Bl.9pF"AGXBPCsi+DGm>@3BB/F*&OCAfu2/AKY
i(DIb:@FD,*)+C]U=@3BN#EcYf8ATD3s@q?d$AftVqCh[NqF<G:8+EV:.+Cf>-FD5W8ARlolDIa
l(DId<j@<?3r@:F%a+D58'ATD4$Bl@l3De:,-DJs`8ARoFb/0JMK@qB4^F!,R<AKZ&-DfTqBG%G
>uD.RTpAKYo'+CT/5+Cei#DII?(E,9)oF*2M7/c~>
"""
ascii85_originalText="Man is distinguished, not only by his reason, but by this singular passion from other animals, which is a lust of the mind, that by a perseverance of delight in the continued and indefatigable generation of knowledge, exceeds the short vehemence of any carnal pleasure."
assert ASCII85Decode.decode(ascii85Test) == ascii85_originalText

780
bin/report/pyPdf/generic.py Normal file
View File

@ -0,0 +1,780 @@
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2006, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Implementation of generic PDF objects (dictionary, number, string, and so on)
"""
__author__ = "Mathieu Fenniak"
__author_email__ = "biziqe@mathieu.fenniak.net"
import re
from utils import readNonWhitespace, RC4_encrypt
import filters
import utils
import decimal
import codecs
def readObject(stream, pdf):
tok = stream.read(1)
stream.seek(-1, 1) # reset to start
if tok == 't' or tok == 'f':
# boolean object
return BooleanObject.readFromStream(stream)
elif tok == '(':
# string object
return readStringFromStream(stream)
elif tok == '/':
# name object
return NameObject.readFromStream(stream)
elif tok == '[':
# array object
return ArrayObject.readFromStream(stream, pdf)
elif tok == 'n':
# null object
return NullObject.readFromStream(stream)
elif tok == '<':
# hexadecimal string OR dictionary
peek = stream.read(2)
stream.seek(-2, 1) # reset to start
if peek == '<<':
return DictionaryObject.readFromStream(stream, pdf)
else:
return readHexStringFromStream(stream)
elif tok == '%':
# comment
while tok not in ('\r', '\n'):
tok = stream.read(1)
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
return readObject(stream, pdf)
else:
# number object OR indirect reference
if tok == '+' or tok == '-':
# number
return NumberObject.readFromStream(stream)
peek = stream.read(20)
stream.seek(-len(peek), 1) # reset to start
if re.match(r"(\d+)\s(\d+)\sR[^a-zA-Z]", peek) != None:
return IndirectObject.readFromStream(stream, pdf)
else:
return NumberObject.readFromStream(stream)
class PdfObject(object):
def getObject(self):
"""Resolves indirect references."""
return self
class NullObject(PdfObject):
def writeToStream(self, stream, encryption_key):
stream.write("null")
def readFromStream(stream):
nulltxt = stream.read(4)
if nulltxt != "null":
raise utils.PdfReadError, "error reading null object"
return NullObject()
readFromStream = staticmethod(readFromStream)
class BooleanObject(PdfObject):
def __init__(self, value):
self.value = value
def writeToStream(self, stream, encryption_key):
if self.value:
stream.write("true")
else:
stream.write("false")
def readFromStream(stream):
word = stream.read(4)
if word == "true":
return BooleanObject(True)
elif word == "fals":
stream.read(1)
return BooleanObject(False)
assert False
readFromStream = staticmethod(readFromStream)
class ArrayObject(list, PdfObject):
def writeToStream(self, stream, encryption_key):
stream.write("[")
for data in self:
stream.write(" ")
data.writeToStream(stream, encryption_key)
stream.write(" ]")
def readFromStream(stream, pdf):
arr = ArrayObject()
tmp = stream.read(1)
if tmp != "[":
raise utils.PdfReadError, "error reading array"
while True:
# skip leading whitespace
tok = stream.read(1)
while tok.isspace():
tok = stream.read(1)
stream.seek(-1, 1)
# check for array ending
peekahead = stream.read(1)
if peekahead == "]":
break
stream.seek(-1, 1)
# read and append obj
arr.append(readObject(stream, pdf))
return arr
readFromStream = staticmethod(readFromStream)
class IndirectObject(PdfObject):
def __init__(self, idnum, generation, pdf):
self.idnum = idnum
self.generation = generation
self.pdf = pdf
def getObject(self):
return self.pdf.getObject(self).getObject()
def __repr__(self):
return "IndirectObject(%r, %r)" % (self.idnum, self.generation)
def __eq__(self, other):
return (
other != None and
isinstance(other, IndirectObject) and
self.idnum == other.idnum and
self.generation == other.generation and
self.pdf is other.pdf
)
def __ne__(self, other):
return not self.__eq__(other)
def writeToStream(self, stream, encryption_key):
stream.write("%s %s R" % (self.idnum, self.generation))
def readFromStream(stream, pdf):
idnum = ""
while True:
tok = stream.read(1)
if tok.isspace():
break
idnum += tok
generation = ""
while True:
tok = stream.read(1)
if tok.isspace():
break
generation += tok
r = stream.read(1)
if r != "R":
raise utils.PdfReadError("error reading indirect object reference")
return IndirectObject(int(idnum), int(generation), pdf)
readFromStream = staticmethod(readFromStream)
class FloatObject(decimal.Decimal, PdfObject):
def __new__(cls, value="0", context=None):
return decimal.Decimal.__new__(cls, str(value), context)
def __repr__(self):
return str(self)
def writeToStream(self, stream, encryption_key):
stream.write(str(self))
class NumberObject(int, PdfObject):
def __init__(self, value):
int.__init__(self, value)
def writeToStream(self, stream, encryption_key):
stream.write(repr(self))
def readFromStream(stream):
name = ""
while True:
tok = stream.read(1)
if tok != '+' and tok != '-' and tok != '.' and not tok.isdigit():
stream.seek(-1, 1)
break
name += tok
if name.find(".") != -1:
return FloatObject(name)
else:
return NumberObject(name)
readFromStream = staticmethod(readFromStream)
##
# Given a string (either a "str" or "unicode"), create a ByteStringObject or a
# TextStringObject to represent the string.
def createStringObject(string):
if isinstance(string, unicode):
return TextStringObject(string)
elif isinstance(string, str):
if string.startswith(codecs.BOM_UTF16_BE):
retval = TextStringObject(string.decode("utf-16"))
retval.autodetect_utf16 = True
return retval
else:
# This is probably a big performance hit here, but we need to
# convert string objects into the text/unicode-aware version if
# possible... and the only way to check if that's possible is
# to try. Some strings are strings, some are just byte arrays.
try:
retval = TextStringObject(decode_pdfdocencoding(string))
retval.autodetect_pdfdocencoding = True
return retval
except UnicodeDecodeError:
return ByteStringObject(string)
else:
raise TypeError("createStringObject should have str or unicode arg")
def readHexStringFromStream(stream):
stream.read(1)
txt = ""
x = ""
while True:
tok = readNonWhitespace(stream)
if tok == ">":
break
x += tok
if len(x) == 2:
txt += chr(int(x, base=16))
x = ""
if len(x) == 1:
x += "0"
if len(x) == 2:
txt += chr(int(x, base=16))
return createStringObject(txt)
def readStringFromStream(stream):
tok = stream.read(1)
parens = 1
txt = ""
while True:
tok = stream.read(1)
if tok == "(":
parens += 1
elif tok == ")":
parens -= 1
if parens == 0:
break
elif tok == "\\":
tok = stream.read(1)
if tok == "n":
tok = "\n"
elif tok == "r":
tok = "\r"
elif tok == "t":
tok = "\t"
elif tok == "b":
tok == "\b"
elif tok == "f":
tok = "\f"
elif tok == "(":
tok = "("
elif tok == ")":
tok = ")"
elif tok == "\\":
tok = "\\"
elif tok.isdigit():
tok += stream.read(2)
tok = chr(int(tok, base=8))
elif tok in "\n\r":
# This case is hit when a backslash followed by a line
# break occurs. If it's a multi-char EOL, consume the
# second character:
tok = stream.read(1)
if not tok in "\n\r":
stream.seek(-1, 1)
# Then don't add anything to the actual string, since this
# line break was escaped:
tok = ''
else:
raise utils.PdfReadError("Unexpected escaped string")
txt += tok
return createStringObject(txt)
##
# Represents a string object where the text encoding could not be determined.
# This occurs quite often, as the PDF spec doesn't provide an alternate way to
# represent strings -- for example, the encryption data stored in files (like
# /O) is clearly not text, but is still stored in a "String" object.
class ByteStringObject(str, PdfObject):
##
# For compatibility with TextStringObject.original_bytes. This method
# returns self.
original_bytes = property(lambda self: self)
def writeToStream(self, stream, encryption_key):
bytearr = self
if encryption_key:
bytearr = RC4_encrypt(encryption_key, bytearr)
stream.write("<")
stream.write(bytearr.encode("hex"))
stream.write(">")
##
# Represents a string object that has been decoded into a real unicode string.
# If read from a PDF document, this string appeared to match the
# PDFDocEncoding, or contained a UTF-16BE BOM mark to cause UTF-16 decoding to
# occur.
class TextStringObject(unicode, PdfObject):
autodetect_pdfdocencoding = False
autodetect_utf16 = False
##
# It is occasionally possible that a text string object gets created where
# a byte string object was expected due to the autodetection mechanism --
# if that occurs, this "original_bytes" property can be used to
# back-calculate what the original encoded bytes were.
original_bytes = property(lambda self: self.get_original_bytes())
def get_original_bytes(self):
# We're a text string object, but the library is trying to get our raw
# bytes. This can happen if we auto-detected this string as text, but
# we were wrong. It's pretty common. Return the original bytes that
# would have been used to create this object, based upon the autodetect
# method.
if self.autodetect_utf16:
return codecs.BOM_UTF16_BE + self.encode("utf-16be")
elif self.autodetect_pdfdocencoding:
return encode_pdfdocencoding(self)
else:
raise Exception("no information about original bytes")
def writeToStream(self, stream, encryption_key):
# Try to write the string out as a PDFDocEncoding encoded string. It's
# nicer to look at in the PDF file. Sadly, we take a performance hit
# here for trying...
try:
bytearr = encode_pdfdocencoding(self)
except UnicodeEncodeError:
bytearr = codecs.BOM_UTF16_BE + self.encode("utf-16be")
if encryption_key:
bytearr = RC4_encrypt(encryption_key, bytearr)
obj = ByteStringObject(bytearr)
obj.writeToStream(stream, None)
else:
stream.write("(")
for c in bytearr:
if not c.isalnum() and c != ' ':
stream.write("\\%03o" % ord(c))
else:
stream.write(c)
stream.write(")")
class NameObject(str, PdfObject):
delimiterCharacters = "(", ")", "<", ">", "[", "]", "{", "}", "/", "%"
def __init__(self, data):
str.__init__(self, data)
def writeToStream(self, stream, encryption_key):
stream.write(self)
def readFromStream(stream):
name = stream.read(1)
if name != "/":
raise utils.PdfReadError, "name read error"
while True:
tok = stream.read(1)
if tok.isspace() or tok in NameObject.delimiterCharacters:
stream.seek(-1, 1)
break
name += tok
return NameObject(name)
readFromStream = staticmethod(readFromStream)
class DictionaryObject(dict, PdfObject):
def __init__(self, *args, **kwargs):
if len(args) == 0:
self.update(kwargs)
elif len(args) == 1:
arr = args[0]
# If we're passed a list/tuple, make a dict out of it
if not hasattr(arr, "iteritems"):
newarr = {}
for k, v in arr:
newarr[k] = v
arr = newarr
self.update(arr)
else:
raise TypeError("dict expected at most 1 argument, got 3")
def update(self, arr):
# note, a ValueError halfway through copying values
# will leave half the values in this dict.
for k, v in arr.iteritems():
self.__setitem__(k, v)
def raw_get(self, key):
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
if not isinstance(key, PdfObject):
raise ValueError("key must be PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("value must be PdfObject")
return dict.__setitem__(self, key, value)
def setdefault(self, key, value=None):
if not isinstance(key, PdfObject):
raise ValueError("key must be PdfObject")
if not isinstance(value, PdfObject):
raise ValueError("value must be PdfObject")
return dict.setdefault(self, key, value)
def __getitem__(self, key):
return dict.__getitem__(self, key).getObject()
##
# Retrieves XMP (Extensible Metadata Platform) data relevant to the
# this object, if available.
# <p>
# Stability: Added in v1.12, will exist for all future v1.x releases.
# @return Returns a {@link #xmp.XmpInformation XmlInformation} instance
# that can be used to access XMP metadata from the document. Can also
# return None if no metadata was found on the document root.
def getXmpMetadata(self):
metadata = self.get("/Metadata", None)
if metadata == None:
return None
metadata = metadata.getObject()
import xmp
if not isinstance(metadata, xmp.XmpInformation):
metadata = xmp.XmpInformation(metadata)
self[NameObject("/Metadata")] = metadata
return metadata
##
# Read-only property that accesses the {@link
# #DictionaryObject.getXmpData getXmpData} function.
# <p>
# Stability: Added in v1.12, will exist for all future v1.x releases.
xmpMetadata = property(lambda self: self.getXmpMetadata(), None, None)
def writeToStream(self, stream, encryption_key):
stream.write("<<\n")
for key, value in self.items():
key.writeToStream(stream, encryption_key)
stream.write(" ")
value.writeToStream(stream, encryption_key)
stream.write("\n")
stream.write(">>")
def readFromStream(stream, pdf):
tmp = stream.read(2)
if tmp != "<<":
raise utils.PdfReadError, "dictionary read error"
data = {}
while True:
tok = readNonWhitespace(stream)
if tok == ">":
stream.read(1)
break
stream.seek(-1, 1)
key = readObject(stream, pdf)
tok = readNonWhitespace(stream)
stream.seek(-1, 1)
value = readObject(stream, pdf)
if data.has_key(key):
# multiple definitions of key not permitted
raise utils.PdfReadError, "multiple definitions in dictionary"
data[key] = value
pos = stream.tell()
s = readNonWhitespace(stream)
if s == 's' and stream.read(5) == 'tream':
eol = stream.read(1)
# odd PDF file output has spaces after 'stream' keyword but before EOL.
# patch provided by Danial Sandler
while eol == ' ':
eol = stream.read(1)
assert eol in ("\n", "\r")
if eol == "\r":
# read \n after
stream.read(1)
# this is a stream object, not a dictionary
assert data.has_key("/Length")
length = data["/Length"]
if isinstance(length, IndirectObject):
t = stream.tell()
length = pdf.getObject(length)
stream.seek(t, 0)
data["__streamdata__"] = stream.read(length)
e = readNonWhitespace(stream)
ndstream = stream.read(8)
if (e + ndstream) != "endstream":
# (sigh) - the odd PDF file has a length that is too long, so
# we need to read backwards to find the "endstream" ending.
# ReportLab (unknown version) generates files with this bug,
# and Python users into PDF files tend to be our audience.
# we need to do this to correct the streamdata and chop off
# an extra character.
pos = stream.tell()
stream.seek(-10, 1)
end = stream.read(9)
if end == "endstream":
# we found it by looking back one character further.
data["__streamdata__"] = data["__streamdata__"][:-1]
else:
stream.seek(pos, 0)
raise utils.PdfReadError, "Unable to find 'endstream' marker after stream."
else:
stream.seek(pos, 0)
if data.has_key("__streamdata__"):
return StreamObject.initializeFromDictionary(data)
else:
retval = DictionaryObject()
retval.update(data)
return retval
readFromStream = staticmethod(readFromStream)
class StreamObject(DictionaryObject):
def __init__(self):
self._data = None
self.decodedSelf = None
def writeToStream(self, stream, encryption_key):
self[NameObject("/Length")] = NumberObject(len(self._data))
DictionaryObject.writeToStream(self, stream, encryption_key)
del self["/Length"]
stream.write("\nstream\n")
data = self._data
if encryption_key:
data = RC4_encrypt(encryption_key, data)
stream.write(data)
stream.write("\nendstream")
def initializeFromDictionary(data):
if data.has_key("/Filter"):
retval = EncodedStreamObject()
else:
retval = DecodedStreamObject()
retval._data = data["__streamdata__"]
del data["__streamdata__"]
del data["/Length"]
retval.update(data)
return retval
initializeFromDictionary = staticmethod(initializeFromDictionary)
def flateEncode(self):
if self.has_key("/Filter"):
f = self["/Filter"]
if isinstance(f, ArrayObject):
f.insert(0, NameObject("/FlateDecode"))
else:
newf = ArrayObject()
newf.append(NameObject("/FlateDecode"))
newf.append(f)
f = newf
else:
f = NameObject("/FlateDecode")
retval = EncodedStreamObject()
retval[NameObject("/Filter")] = f
retval._data = filters.FlateDecode.encode(self._data)
return retval
class DecodedStreamObject(StreamObject):
def getData(self):
return self._data
def setData(self, data):
self._data = data
class EncodedStreamObject(StreamObject):
def __init__(self):
self.decodedSelf = None
def getData(self):
if self.decodedSelf:
# cached version of decoded object
return self.decodedSelf.getData()
else:
# create decoded object
decoded = DecodedStreamObject()
decoded._data = filters.decodeStreamData(self)
for key, value in self.items():
if not key in ("/Length", "/Filter", "/DecodeParms"):
decoded[key] = value
self.decodedSelf = decoded
return decoded._data
def setData(self, data):
raise utils.PdfReadError, "Creating EncodedStreamObject is not currently supported"
class RectangleObject(ArrayObject):
def __init__(self, arr):
# must have four points
assert len(arr) == 4
# automatically convert arr[x] into NumberObject(arr[x]) if necessary
ArrayObject.__init__(self, [self.ensureIsNumber(x) for x in arr])
def ensureIsNumber(self, value):
if not isinstance(value, (NumberObject, FloatObject)):
value = FloatObject(value)
return value
def __repr__(self):
return "RectangleObject(%s)" % repr(list(self))
def getLowerLeft_x(self):
return self[0]
def getLowerLeft_y(self):
return self[1]
def getUpperRight_x(self):
return self[2]
def getUpperRight_y(self):
return self[3]
def getUpperLeft_x(self):
return self.getLowerLeft_x()
def getUpperLeft_y(self):
return self.getUpperRight_y()
def getLowerRight_x(self):
return self.getUpperRight_x()
def getLowerRight_y(self):
return self.getLowerLeft_y()
def getLowerLeft(self):
return self.getLowerLeft_x(), self.getLowerLeft_y()
def getLowerRight(self):
return self.getLowerRight_x(), self.getLowerRight_y()
def getUpperLeft(self):
return self.getUpperLeft_x(), self.getUpperLeft_y()
def getUpperRight(self):
return self.getUpperRight_x(), self.getUpperRight_y()
def setLowerLeft(self, value):
self[0], self[1] = [self.ensureIsNumber(x) for x in value]
def setLowerRight(self, value):
self[2], self[1] = [self.ensureIsNumber(x) for x in value]
def setUpperLeft(self, value):
self[0], self[3] = [self.ensureIsNumber(x) for x in value]
def setUpperRight(self, value):
self[2], self[3] = [self.ensureIsNumber(x) for x in value]
lowerLeft = property(getLowerLeft, setLowerLeft, None, None)
lowerRight = property(getLowerRight, setLowerRight, None, None)
upperLeft = property(getUpperLeft, setUpperLeft, None, None)
upperRight = property(getUpperRight, setUpperRight, None, None)
def encode_pdfdocencoding(unicode_string):
retval = ''
for c in unicode_string:
try:
retval += chr(_pdfDocEncoding_rev[c])
except KeyError:
raise UnicodeEncodeError("pdfdocencoding", c, -1, -1,
"does not exist in translation table")
return retval
def decode_pdfdocencoding(byte_array):
retval = u''
for b in byte_array:
c = _pdfDocEncoding[ord(b)]
if c == u'\u0000':
raise UnicodeDecodeError("pdfdocencoding", b, -1, -1,
"does not exist in translation table")
retval += c
return retval
_pdfDocEncoding = (
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000', u'\u0000',
u'\u02d8', u'\u02c7', u'\u02c6', u'\u02d9', u'\u02dd', u'\u02db', u'\u02da', u'\u02dc',
u'\u0020', u'\u0021', u'\u0022', u'\u0023', u'\u0024', u'\u0025', u'\u0026', u'\u0027',
u'\u0028', u'\u0029', u'\u002a', u'\u002b', u'\u002c', u'\u002d', u'\u002e', u'\u002f',
u'\u0030', u'\u0031', u'\u0032', u'\u0033', u'\u0034', u'\u0035', u'\u0036', u'\u0037',
u'\u0038', u'\u0039', u'\u003a', u'\u003b', u'\u003c', u'\u003d', u'\u003e', u'\u003f',
u'\u0040', u'\u0041', u'\u0042', u'\u0043', u'\u0044', u'\u0045', u'\u0046', u'\u0047',
u'\u0048', u'\u0049', u'\u004a', u'\u004b', u'\u004c', u'\u004d', u'\u004e', u'\u004f',
u'\u0050', u'\u0051', u'\u0052', u'\u0053', u'\u0054', u'\u0055', u'\u0056', u'\u0057',
u'\u0058', u'\u0059', u'\u005a', u'\u005b', u'\u005c', u'\u005d', u'\u005e', u'\u005f',
u'\u0060', u'\u0061', u'\u0062', u'\u0063', u'\u0064', u'\u0065', u'\u0066', u'\u0067',
u'\u0068', u'\u0069', u'\u006a', u'\u006b', u'\u006c', u'\u006d', u'\u006e', u'\u006f',
u'\u0070', u'\u0071', u'\u0072', u'\u0073', u'\u0074', u'\u0075', u'\u0076', u'\u0077',
u'\u0078', u'\u0079', u'\u007a', u'\u007b', u'\u007c', u'\u007d', u'\u007e', u'\u0000',
u'\u2022', u'\u2020', u'\u2021', u'\u2026', u'\u2014', u'\u2013', u'\u0192', u'\u2044',
u'\u2039', u'\u203a', u'\u2212', u'\u2030', u'\u201e', u'\u201c', u'\u201d', u'\u2018',
u'\u2019', u'\u201a', u'\u2122', u'\ufb01', u'\ufb02', u'\u0141', u'\u0152', u'\u0160',
u'\u0178', u'\u017d', u'\u0131', u'\u0142', u'\u0153', u'\u0161', u'\u017e', u'\u0000',
u'\u20ac', u'\u00a1', u'\u00a2', u'\u00a3', u'\u00a4', u'\u00a5', u'\u00a6', u'\u00a7',
u'\u00a8', u'\u00a9', u'\u00aa', u'\u00ab', u'\u00ac', u'\u0000', u'\u00ae', u'\u00af',
u'\u00b0', u'\u00b1', u'\u00b2', u'\u00b3', u'\u00b4', u'\u00b5', u'\u00b6', u'\u00b7',
u'\u00b8', u'\u00b9', u'\u00ba', u'\u00bb', u'\u00bc', u'\u00bd', u'\u00be', u'\u00bf',
u'\u00c0', u'\u00c1', u'\u00c2', u'\u00c3', u'\u00c4', u'\u00c5', u'\u00c6', u'\u00c7',
u'\u00c8', u'\u00c9', u'\u00ca', u'\u00cb', u'\u00cc', u'\u00cd', u'\u00ce', u'\u00cf',
u'\u00d0', u'\u00d1', u'\u00d2', u'\u00d3', u'\u00d4', u'\u00d5', u'\u00d6', u'\u00d7',
u'\u00d8', u'\u00d9', u'\u00da', u'\u00db', u'\u00dc', u'\u00dd', u'\u00de', u'\u00df',
u'\u00e0', u'\u00e1', u'\u00e2', u'\u00e3', u'\u00e4', u'\u00e5', u'\u00e6', u'\u00e7',
u'\u00e8', u'\u00e9', u'\u00ea', u'\u00eb', u'\u00ec', u'\u00ed', u'\u00ee', u'\u00ef',
u'\u00f0', u'\u00f1', u'\u00f2', u'\u00f3', u'\u00f4', u'\u00f5', u'\u00f6', u'\u00f7',
u'\u00f8', u'\u00f9', u'\u00fa', u'\u00fb', u'\u00fc', u'\u00fd', u'\u00fe', u'\u00ff'
)
assert len(_pdfDocEncoding) == 256
_pdfDocEncoding_rev = {}
for i in xrange(256):
char = _pdfDocEncoding[i]
if char == u"\u0000":
continue
assert char not in _pdfDocEncoding_rev
_pdfDocEncoding_rev[char] = i

1527
bin/report/pyPdf/pdf.py Normal file

File diff suppressed because it is too large Load Diff

110
bin/report/pyPdf/utils.py Normal file
View File

@ -0,0 +1,110 @@
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2006, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Utility functions for PDF library.
"""
__author__ = "Mathieu Fenniak"
__author_email__ = "biziqe@mathieu.fenniak.net"
#ENABLE_PSYCO = False
#if ENABLE_PSYCO:
# try:
# import psyco
# except ImportError:
# ENABLE_PSYCO = False
#
#if not ENABLE_PSYCO:
# class psyco:
# def proxy(func):
# return func
# proxy = staticmethod(proxy)
def readUntilWhitespace(stream, maxchars=None):
txt = ""
while True:
tok = stream.read(1)
if tok.isspace() or not tok:
break
txt += tok
if len(txt) == maxchars:
break
return txt
def readNonWhitespace(stream):
tok = ' '
while tok == '\n' or tok == '\r' or tok == ' ' or tok == '\t':
tok = stream.read(1)
return tok
class ConvertFunctionsToVirtualList(object):
def __init__(self, lengthFunction, getFunction):
self.lengthFunction = lengthFunction
self.getFunction = getFunction
def __len__(self):
return self.lengthFunction()
def __getitem__(self, index):
if not isinstance(index, int):
raise TypeError, "sequence indices must be integers"
len_self = len(self)
if index < 0:
# support negative indexes
index = len_self + index
if index < 0 or index >= len_self:
raise IndexError, "sequence index out of range"
return self.getFunction(index)
def RC4_encrypt(key, plaintext):
S = [i for i in range(256)]
j = 0
for i in range(256):
j = (j + S[i] + ord(key[i % len(key)])) % 256
S[i], S[j] = S[j], S[i]
i, j = 0, 0
retval = ""
for x in range(len(plaintext)):
i = (i + 1) % 256
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i]
t = S[(S[i] + S[j]) % 256]
retval += chr(ord(plaintext[x]) ^ t)
return retval
class PdfReadError(Exception):
pass
if __name__ == "__main__":
# test RC4
out = RC4_encrypt("Key", "Plaintext")
print repr(out)
pt = RC4_encrypt("Key", out)
print repr(pt)

355
bin/report/pyPdf/xmp.py Normal file
View File

@ -0,0 +1,355 @@
import re
import datetime
import decimal
from generic import PdfObject
from xml.dom import getDOMImplementation
from xml.dom.minidom import parseString
RDF_NAMESPACE = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
DC_NAMESPACE = "http://purl.org/dc/elements/1.1/"
XMP_NAMESPACE = "http://ns.adobe.com/xap/1.0/"
PDF_NAMESPACE = "http://ns.adobe.com/pdf/1.3/"
XMPMM_NAMESPACE = "http://ns.adobe.com/xap/1.0/mm/"
# What is the PDFX namespace, you might ask? I might ask that too. It's
# a completely undocumented namespace used to place "custom metadata"
# properties, which are arbitrary metadata properties with no semantic or
# documented meaning. Elements in the namespace are key/value-style storage,
# where the element name is the key and the content is the value. The keys
# are transformed into valid XML identifiers by substituting an invalid
# identifier character with \u2182 followed by the unicode hex ID of the
# original character. A key like "my car" is therefore "my\u21820020car".
#
# \u2182, in case you're wondering, is the unicode character
# \u{ROMAN NUMERAL TEN THOUSAND}, a straightforward and obvious choice for
# escaping characters.
#
# Intentional users of the pdfx namespace should be shot on sight. A
# custom data schema and sensical XML elements could be used instead, as is
# suggested by Adobe's own documentation on XMP (under "Extensibility of
# Schemas").
#
# Information presented here on the /pdfx/ schema is a result of limited
# reverse engineering, and does not constitute a full specification.
PDFX_NAMESPACE = "http://ns.adobe.com/pdfx/1.3/"
iso8601 = re.compile("""
(?P<year>[0-9]{4})
(-
(?P<month>[0-9]{2})
(-
(?P<day>[0-9]+)
(T
(?P<hour>[0-9]{2}):
(?P<minute>[0-9]{2})
(:(?P<second>[0-9]{2}(.[0-9]+)?))?
(?P<tzd>Z|[-+][0-9]{2}:[0-9]{2})
)?
)?
)?
""", re.VERBOSE)
##
# An object that represents Adobe XMP metadata.
class XmpInformation(PdfObject):
def __init__(self, stream):
self.stream = stream
docRoot = parseString(self.stream.getData())
self.rdfRoot = docRoot.getElementsByTagNameNS(RDF_NAMESPACE, "RDF")[0]
self.cache = {}
def writeToStream(self, stream, encryption_key):
self.stream.writeToStream(stream, encryption_key)
def getElement(self, aboutUri, namespace, name):
for desc in self.rdfRoot.getElementsByTagNameNS(RDF_NAMESPACE, "Description"):
if desc.getAttributeNS(RDF_NAMESPACE, "about") == aboutUri:
attr = desc.getAttributeNodeNS(namespace, name)
if attr != None:
yield attr
for element in desc.getElementsByTagNameNS(namespace, name):
yield element
def getNodesInNamespace(self, aboutUri, namespace):
for desc in self.rdfRoot.getElementsByTagNameNS(RDF_NAMESPACE, "Description"):
if desc.getAttributeNS(RDF_NAMESPACE, "about") == aboutUri:
for i in range(desc.attributes.length):
attr = desc.attributes.item(i)
if attr.namespaceURI == namespace:
yield attr
for child in desc.childNodes:
if child.namespaceURI == namespace:
yield child
def _getText(self, element):
text = ""
for child in element.childNodes:
if child.nodeType == child.TEXT_NODE:
text += child.data
return text
def _converter_string(value):
return value
def _converter_date(value):
m = iso8601.match(value)
year = int(m.group("year"))
month = int(m.group("month") or "1")
day = int(m.group("day") or "1")
hour = int(m.group("hour") or "0")
minute = int(m.group("minute") or "0")
second = decimal.Decimal(m.group("second") or "0")
seconds = second.to_integral(decimal.ROUND_FLOOR)
milliseconds = (second - seconds) * 1000000
tzd = m.group("tzd") or "Z"
dt = datetime.datetime(year, month, day, hour, minute, seconds, milliseconds)
if tzd != "Z":
tzd_hours, tzd_minutes = [int(x) for x in tzd.split(":")]
tzd_hours *= -1
if tzd_hours < 0:
tzd_minutes *= -1
dt = dt + datetime.timedelta(hours=tzd_hours, minutes=tzd_minutes)
return dt
_test_converter_date = staticmethod(_converter_date)
def _getter_bag(namespace, name, converter):
def get(self):
cached = self.cache.get(namespace, {}).get(name)
if cached:
return cached
retval = []
for element in self.getElement("", namespace, name):
bags = element.getElementsByTagNameNS(RDF_NAMESPACE, "Bag")
if len(bags):
for bag in bags:
for item in bag.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
value = self._getText(item)
value = converter(value)
retval.append(value)
ns_cache = self.cache.setdefault(namespace, {})
ns_cache[name] = retval
return retval
return get
def _getter_seq(namespace, name, converter):
def get(self):
cached = self.cache.get(namespace, {}).get(name)
if cached:
return cached
retval = []
for element in self.getElement("", namespace, name):
seqs = element.getElementsByTagNameNS(RDF_NAMESPACE, "Seq")
if len(seqs):
for seq in seqs:
for item in seq.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
value = self._getText(item)
value = converter(value)
retval.append(value)
else:
value = converter(self._getText(element))
retval.append(value)
ns_cache = self.cache.setdefault(namespace, {})
ns_cache[name] = retval
return retval
return get
def _getter_langalt(namespace, name, converter):
def get(self):
cached = self.cache.get(namespace, {}).get(name)
if cached:
return cached
retval = {}
for element in self.getElement("", namespace, name):
alts = element.getElementsByTagNameNS(RDF_NAMESPACE, "Alt")
if len(alts):
for alt in alts:
for item in alt.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
value = self._getText(item)
value = converter(value)
retval[item.getAttribute("xml:lang")] = value
else:
retval["x-default"] = converter(self._getText(element))
ns_cache = self.cache.setdefault(namespace, {})
ns_cache[name] = retval
return retval
return get
def _getter_single(namespace, name, converter):
def get(self):
cached = self.cache.get(namespace, {}).get(name)
if cached:
return cached
value = None
for element in self.getElement("", namespace, name):
if element.nodeType == element.ATTRIBUTE_NODE:
value = element.nodeValue
else:
value = self._getText(element)
break
if value != None:
value = converter(value)
ns_cache = self.cache.setdefault(namespace, {})
ns_cache[name] = value
return value
return get
##
# Contributors to the resource (other than the authors). An unsorted
# array of names.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_contributor = property(_getter_bag(DC_NAMESPACE, "contributor", _converter_string))
##
# Text describing the extent or scope of the resource.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_coverage = property(_getter_single(DC_NAMESPACE, "coverage", _converter_string))
##
# A sorted array of names of the authors of the resource, listed in order
# of precedence.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_creator = property(_getter_seq(DC_NAMESPACE, "creator", _converter_string))
##
# A sorted array of dates (datetime.datetime instances) of signifigance to
# the resource. The dates and times are in UTC.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_date = property(_getter_seq(DC_NAMESPACE, "date", _converter_date))
##
# A language-keyed dictionary of textual descriptions of the content of the
# resource.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_description = property(_getter_langalt(DC_NAMESPACE, "description", _converter_string))
##
# The mime-type of the resource.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_format = property(_getter_single(DC_NAMESPACE, "format", _converter_string))
##
# Unique identifier of the resource.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_identifier = property(_getter_single(DC_NAMESPACE, "identifier", _converter_string))
##
# An unordered array specifying the languages used in the resource.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_language = property(_getter_bag(DC_NAMESPACE, "language", _converter_string))
##
# An unordered array of publisher names.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_publisher = property(_getter_bag(DC_NAMESPACE, "publisher", _converter_string))
##
# An unordered array of text descriptions of relationships to other
# documents.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_relation = property(_getter_bag(DC_NAMESPACE, "relation", _converter_string))
##
# A language-keyed dictionary of textual descriptions of the rights the
# user has to this resource.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_rights = property(_getter_langalt(DC_NAMESPACE, "rights", _converter_string))
##
# Unique identifier of the work from which this resource was derived.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_source = property(_getter_single(DC_NAMESPACE, "source", _converter_string))
##
# An unordered array of descriptive phrases or keywrods that specify the
# topic of the content of the resource.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_subject = property(_getter_bag(DC_NAMESPACE, "subject", _converter_string))
##
# A language-keyed dictionary of the title of the resource.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_title = property(_getter_langalt(DC_NAMESPACE, "title", _converter_string))
##
# An unordered array of textual descriptions of the document type.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
dc_type = property(_getter_bag(DC_NAMESPACE, "type", _converter_string))
##
# An unformatted text string representing document keywords.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
pdf_keywords = property(_getter_single(PDF_NAMESPACE, "Keywords", _converter_string))
##
# The PDF file version, for example 1.0, 1.3.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
pdf_pdfversion = property(_getter_single(PDF_NAMESPACE, "PDFVersion", _converter_string))
##
# The name of the tool that created the PDF document.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
pdf_producer = property(_getter_single(PDF_NAMESPACE, "Producer", _converter_string))
##
# The date and time the resource was originally created. The date and
# time are returned as a UTC datetime.datetime object.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
xmp_createDate = property(_getter_single(XMP_NAMESPACE, "CreateDate", _converter_date))
##
# The date and time the resource was last modified. The date and time
# are returned as a UTC datetime.datetime object.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
xmp_modifyDate = property(_getter_single(XMP_NAMESPACE, "ModifyDate", _converter_date))
##
# The date and time that any metadata for this resource was last
# changed. The date and time are returned as a UTC datetime.datetime
# object.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
xmp_metadataDate = property(_getter_single(XMP_NAMESPACE, "MetadataDate", _converter_date))
##
# The name of the first known tool used to create the resource.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
xmp_creatorTool = property(_getter_single(XMP_NAMESPACE, "CreatorTool", _converter_string))
##
# The common identifier for all versions and renditions of this resource.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
xmpmm_documentId = property(_getter_single(XMPMM_NAMESPACE, "DocumentID", _converter_string))
##
# An identifier for a specific incarnation of a document, updated each
# time a file is saved.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
xmpmm_instanceId = property(_getter_single(XMPMM_NAMESPACE, "InstanceID", _converter_string))
def custom_properties(self):
if not hasattr(self, "_custom_properties"):
self._custom_properties = {}
for node in self.getNodesInNamespace("", PDFX_NAMESPACE):
key = node.localName
while True:
# see documentation about PDFX_NAMESPACE earlier in file
idx = key.find(u"\u2182")
if idx == -1:
break
key = key[:idx] + chr(int(key[idx+1:idx+5], base=16)) + key[idx+5:]
if node.nodeType == node.ATTRIBUTE_NODE:
value = node.nodeValue
else:
value = self._getText(node)
self._custom_properties[key] = value
return self._custom_properties
##
# Retrieves custom metadata properties defined in the undocumented pdfx
# metadata schema.
# <p>Stability: Added in v1.12, will exist for all future v1.x releases.
# @return Returns a dictionary of key/value items for custom metadata
# properties.
custom_properties = property(custom_properties)

View File

@ -48,7 +48,7 @@ import reportlab
import re
from reportlab.pdfgen import canvas
from reportlab import platypus
import cStringIO
import utils
import color
import os
@ -351,26 +351,28 @@ class _rml_canvas(object):
import urllib
from reportlab.lib.utils import ImageReader
s = StringIO()
# s = StringIO()
if not node.hasAttribute('file'):
if node.hasAttribute('name'):
image_data = self.images[node.getAttribute('name')]
s.write(image_data)
s = cStringIO.StringIO(image_data)
else:
import base64
image_data = base64.decodestring(node.firstChild.nodeValue)
if not image_data: return False
s.write(image_data)
s = cStringIO.StringIO(image_data)
# s.write(image_data)
else:
if node.getAttribute('file') in self.images:
s.write(self.images[node.getAttribute('file')])
s = cStringIO.StringIO(self.images[node.getAttribute('file')])
# s.write(self.images[node.getAttribute('file')])
else:
try:
u = urllib.urlopen(str(node.getAttribute('file')))
except:
u = file(os.path.join(self.path,str(node.getAttribute('file'))), 'rb')
s.write(u.read())
s = cStringIO.StringIO(u.read())
img = ImageReader(s)
(sx,sy) = img.getSize()

View File

@ -155,27 +155,28 @@ class _format(object):
if os.name == 'nt':
locale.setlocale(locale.LC_ALL, _LOCALE2WIN32.get(lang, lang) + '.' + encoding)
else:
locale.setlocale(locale.LC_ALL, lang + '.' + encoding)
locale.setlocale(locale.LC_ALL,str( lang + '.' + encoding))
except Exception:
netsvc.Logger().notifyChannel('report', netsvc.LOG_WARNING,
'report %s: unable to set locale "%s"' % (self.name,
self.object._context.get('lang', 'en_US') or 'en_US'))
class _float_format(float, _format):
def __str__(self):
if not self.object._context:
return self.name
return locale.format('%f', self.name, True)
digit = 2
if hasattr(self._field, 'digits') and self._field.digits:
digit = self._field.digits[1]
return locale.format('%.' + str(digit) + 'f', self.name, True)
class _int_format(int, _format):
def __str__(self):
if not self.object._context:
return self.name
return locale.format('%d', self.name, True)
class _date_format(str, _format):
def __str__(self):
if not self.object._context:
@ -188,7 +189,8 @@ class _date_format(str, _format):
datedata)
except :
pass
return ''
return ''
_fields_process = {
'float': _float_format,
@ -243,7 +245,7 @@ class rml_parse(object):
self.cr = cr
self.uid = uid
self.pool = pooler.get_pool(cr.dbname)
user = self.pool.get('res.users').browse(cr, uid, uid)
user = self.pool.get('res.users').browse(cr, uid, uid, fields_process=_fields_process)
self.localcontext = {
'user': user,
'company': user.company_id,
@ -331,34 +333,30 @@ class rml_parse(object):
else:
obj._cache[table][id] = {'id': id}
def formatLang(self, value, digits=2, date=False,date_time=False, grouping=True, monetary=False, currency=None):
if not value:
return ''
def formatLang(self, value, digits=2, date=False,date_time=False, grouping=True, monetary=False, currency=None):
if isinstance(value, (str, unicode)) and not value:
return ''
pool_lang=self.pool.get('res.lang')
lang = self.localcontext.get('lang', 'en_US') or 'en_US'
lang_obj = pool_lang.browse(self.cr,self.uid,pool_lang.search(self.cr,self.uid,[('code','=',lang)])[0])
if date or date_time:
date_format = lang_obj.date_format
if date_time:
date_format = lang_obj.date_format + " " + lang_obj.time_format
if not isinstance(value, time.struct_time):
# assume string, parse it
if len(str(value)) == 10:
# length of date like 2001-01-01 is ten
# assume format '%Y-%m-%d'
date = mx.DateTime.strptime(str(value),DT_FORMAT)
date = mx.DateTime.strptime(value,DT_FORMAT)
else:
# assume format '%Y-%m-%d %H:%M:%S'
value = str(value)[:19]
date = mx.DateTime.strptime(str(value),DHM_FORMAT)
else:
date = mx.DateTime.DateTime(*(value.timetuple()[:6]))
return date.strftime(date_format)
return lang_obj.format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
# def formatLang(self, value, digit=2, date=False):
@ -582,41 +580,90 @@ class report_sxw(report_rml):
def getObjects(self, cr, uid, ids, context):
table_obj = pooler.get_pool(cr.dbname).get(self.table)
return table_obj.browse(cr, uid, ids, list_class=browse_record_list, context=context)
return table_obj.browse(cr, uid, ids, list_class=browse_record_list, context=context,
fields_process=_fields_process)
def create(self, cr, uid, ids, data, context=None):
logo = None
if not context:
context={}
context = context.copy()
pool = pooler.get_pool(cr.dbname)
ir_actions_report_xml_obj = pool.get('ir.actions.report.xml')
report_xml_ids = ir_actions_report_xml_obj.search(cr, uid,
ir_obj = pool.get('ir.actions.report.xml')
report_xml_ids = ir_obj.search(cr, uid,
[('report_name', '=', self.name[7:])], context=context)
report_type = 'pdf'
report_xml = None
title=''
attach = False
if report_xml_ids:
report_xml = ir_actions_report_xml_obj.browse(cr, uid, report_xml_ids[0],
report_xml = ir_obj.browse(cr, uid, report_xml_ids[0],
context=context)
title = report_xml.name
attach = report_xml.attachment
rml = report_xml.report_rml_content
report_type = report_xml.report_type
else:
ir_menu_report_obj = pool.get('ir.ui.menu')
report_menu_ids = ir_menu_report_obj.search(cr, uid,
[('id', 'in', ids)], context=context)
title = ''
if report_menu_ids:
report_name = ir_menu_report_obj.browse(cr, uid, report_menu_ids[0],
context=context)
title = report_name.name
rml = tools.file_open(self.tmpl, subdir=None).read()
report_type= data.get('report_type', report_type)
report_type= data.get('report_type', 'pdf')
class a(object):
def __init__(self, *args, **argv):
for key,arg in argv.items():
setattr(self, key, arg)
report_xml = a(title=title, report_type=report_type, report_rml_content=rml, name=title, attachment=False, header=self.header)
attach = False
if report_type in ['sxw','odt'] and report_xml:
if attach:
objs = self.getObjects(cr, uid, ids, context)
results = []
for obj in objs:
aname = eval(attach, {'object':obj, 'time':time})
result = False
if report_xml.attachment_use and aname and context.get('attachment_use', True):
aids = pool.get('ir.attachment').search(cr, uid, [('datas_fname','=',aname+'.pdf'),('res_model','=',self.table),('res_id','=',obj.id)])
if aids:
d = base64.decodestring(pool.get('ir.attachment').browse(cr, uid, aids[0]).datas)
results.append((d,'pdf'))
continue
result = self.create_single(cr, uid, [obj.id], data, report_xml, context)
if aname:
name = aname+'.'+result[1]
pool.get('ir.attachment').create(cr, uid, {
'name': aname,
'datas': base64.encodestring(result[0]),
'datas_fname': name,
'res_model': self.table,
'res_id': obj.id,
}, context=context
)
cr.commit()
results.append(result)
if results[0][1]=='pdf':
from pyPdf import PdfFileWriter, PdfFileReader
import cStringIO
output = PdfFileWriter()
for r in results:
reader = PdfFileReader(cStringIO.StringIO(r[0]))
for page in range(reader.getNumPages()):
output.addPage(reader.getPage(page))
s = cStringIO.StringIO()
output.write(s)
return s.getvalue(), results[0][1]
return self.create_single(cr, uid, ids, data, report_xml, context)
def create_single(self, cr, uid, ids, data, report_xml, context={}):
logo = None
context = context.copy()
pool = pooler.get_pool(cr.dbname)
want_header = self.header
title = report_xml.name
attach = report_xml.attachment
report_type = report_xml.report_type
want_header = report_xml.header
if report_type in ['sxw','odt']:
context['parents'] = sxw_parents
sxw_io = StringIO.StringIO(report_xml.report_sxw_content)
sxw_z = zipfile.ZipFile(sxw_io, mode='r')
@ -643,8 +690,6 @@ class report_sxw(report_rml):
pe.appendChild(cnd)
pp.removeChild(de)
# Add Information : Resource ID and Model
rml_dom_meta = xml.dom.minidom.parseString(meta)
node = rml_dom_meta.documentElement
@ -657,14 +702,14 @@ class report_sxw(report_rml):
pe.childNodes[0].data=data['model']
meta = rml_dom_meta.documentElement.toxml('utf-8')
rml2 = rml_parser._parse(rml_dom, objs, data, header=self.header)
rml2 = rml_parser._parse(rml_dom, objs, data, header=want_header)
sxw_z = zipfile.ZipFile(sxw_io, mode='a')
sxw_z.writestr('content.xml', "<?xml version='1.0' encoding='UTF-8'?>" + \
rml2)
sxw_z.writestr('meta.xml', "<?xml version='1.0' encoding='UTF-8'?>" + \
meta)
if self.header:
if want_header:
#Add corporate header/footer
if report_type=='odt':
rml = tools.file_open('custom/corporate_odt_header.xml').read()
@ -676,13 +721,14 @@ class report_sxw(report_rml):
objs = self.getObjects(cr, uid, ids, context)
rml_parser.preprocess(objs, data, ids)
rml_dom = xml.dom.minidom.parseString(rml)
rml2 = rml_parser._parse(rml_dom, objs, data, header=self.header)
rml2 = rml_parser._parse(rml_dom, objs, data, header=want_header)
sxw_z.writestr('styles.xml',"<?xml version='1.0' encoding='UTF-8'?>" + \
rml2)
sxw_z.close()
rml2 = sxw_io.getvalue()
sxw_io.close()
else:
rml = report_xml.report_rml_content
context['parents'] = rml_parents
rml_parser = self.parser(cr, uid, self.name2, context)
rml_parser.parents = rml_parents
@ -690,27 +736,12 @@ class report_sxw(report_rml):
objs = self.getObjects(cr, uid, ids, context)
rml_parser.preprocess(objs, data, ids)
rml_dom = xml.dom.minidom.parseString(rml)
rml2 = rml_parser._parse(rml_dom, objs, data, header=self.header)
rml2 = rml_parser._parse(rml_dom, objs, data, header=want_header)
if rml_parser.logo:
logo = base64.decodestring(rml_parser.logo)
create_doc = self.generators[report_type]
pdf = create_doc(rml2, logo,title)
pdf = create_doc(rml2, logo, title.encode('utf8'))
if attach:
# TODO: save multiple print with symbolic links in attach
pool.get('ir.attachment').create(cr, uid, {
'name': (title or _('print'))+':'+time.strftime('%Y-%m-%d %H:%M:%S'),
'datas': base64.encodestring(pdf),
'datas_fname': attach+time.strftime('%Y-%m-%d')+'.'+report_type,
'res_model': self.table,
'res_id': ids[0]
}, context=context
)
cr.commit()
return (pdf, report_type)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -72,7 +72,7 @@ class db(netsvc.Service):
cr = db.cursor()
try:
cr.autocommit(True)
cr.execute('CREATE DATABASE ' + db_name + ' ENCODING \'unicode\'')
cr.execute('CREATE DATABASE "%s" ENCODING \'unicode\'' % db_name)
finally:
cr.close()
sql_db.close_db('template1')
@ -153,7 +153,7 @@ class db(netsvc.Service):
cr.autocommit(True)
try:
try:
cr.execute('DROP DATABASE ' + db_name)
cr.execute('DROP DATABASE "%s"' % db_name)
except Exception, e:
logger.notifyChannel("web-services", netsvc.LOG_ERROR,
'DROP DB: %s failed:\n%s' % (db_name, e))
@ -204,7 +204,7 @@ class db(netsvc.Service):
cr = db.cursor()
cr.autocommit(True)
try:
cr.execute('CREATE DATABASE ' + db_name + ' ENCODING \'unicode\'')
cr.execute('CREATE DATABASE "%s" ENCODING \'unicode\'' % db_name)
finally:
cr.close()
sql_db.close_db('template1')
@ -249,7 +249,6 @@ class db(netsvc.Service):
cr = db.cursor()
try:
try:
cr = db.cursor()
db_user = tools.config["db_user"]
if not db_user and os.name == 'posix':
import pwd
@ -259,9 +258,9 @@ class db(netsvc.Service):
res = cr.fetchone()
db_user = res and str(res[0])
if db_user:
cr.execute("select decode(datname, 'escape') from pg_database where datdba=(select usesysid from pg_user where usename=%s) and datname not in ('template0', 'template1', 'postgres')", (db_user,))
cr.execute("select decode(datname, 'escape') from pg_database where datdba=(select usesysid from pg_user where usename=%s) and datname not in ('template0', 'template1', 'postgres') order by datname", (db_user,))
else:
cr.execute("select decode(datname, 'escape') from pg_database where datname not in('template0', 'template1','postgres')")
cr.execute("select decode(datname, 'escape') from pg_database where datname not in('template0', 'template1','postgres') order by datname")
res = [str(name) for (name,) in cr.fetchall()]
except:
res = []
@ -402,24 +401,54 @@ GNU Public Licence.
zips = rc.retrieve_updates(rc.id)
from shutil import rmtree
from shutil import rmtree, copytree, copy
backup_directory = os.path.join(tools.config['root_path'], 'backup', time.strftime('%Y-%m-%d-%H-%M'))
if zips and not os.path.isdir(backup_directory):
l.notifyChannel('migration', netsvc.LOG_INFO, 'create a new backup directory to \
store the old modules: %s' % (backup_directory,))
os.makedirs(backup_directory)
for module in zips:
l.notifyChannel('migration', netsvc.LOG_INFO, 'upgrade module %s' % (module,))
mp = addons.get_module_path(module)
if mp:
if os.path.isdir(mp):
copytree(mp, os.path.join(backup_directory, module))
if os.path.islink(mp):
os.unlink(mp)
else:
rmtree(mp)
else:
copy(mp + 'zip', backup_directory)
os.unlink(mp + '.zip')
mp = os.path.join(tools.config['addons_path'], module + '.zip')
try:
try:
base64_decoded = base64.decodestring(zips[module])
except:
l.notifyChannel('migration', netsvc.LOG_ERROR, 'unable to read the module %s' % (module,))
raise
zip = open(mp, 'w')
zip.write(base64.decodestring(zips[module]))
zip.close()
zip_contents = cStringIO.StringIO(base64_decoded)
zip_contents.seek(0)
try:
try:
tools.extract_zip_file(zip_contents, tools.config['addons_path'] )
except:
l.notifyChannel('migration', netsvc.LOG_ERROR, 'unable to extract the module %s' % (module, ))
rmtree(module)
raise
finally:
zip_contents.close()
except:
l.notifyChannel('migration', netsvc.LOG_ERROR, 'restore the previous version of the module %s' % (module, ))
nmp = os.path.join(backup_directory, module)
if os.path.isdir(nmp):
copytree(nmp, tools.config['addons_path'])
else:
copy(nmp+'.zip', tools.config['addons_path'])
raise
return True
except tm.RemoteContractException, e:
@ -518,6 +547,13 @@ wizard()
# Report state:
# False -> True
#
class ExceptionWithTraceback(Exception):
def __init__(self, msg, tb):
self.message = msg
self.traceback = tb
self.args = (msg, tb)
class report_spool(netsvc.Service):
def __init__(self, name='report'):
netsvc.Service.__init__(self, name)
@ -553,12 +589,12 @@ class report_spool(netsvc.Service):
except Exception, exception:
import traceback
import sys
tb_s = reduce(lambda x, y: x+y, traceback.format_exception(
sys.exc_type, sys.exc_value, sys.exc_traceback))
tb = sys.exc_info()
tb_s = "".join(traceback.format_exception(*tb))
logger = netsvc.Logger()
logger.notifyChannel('web-services', netsvc.LOG_ERROR,
'Exception: %s\n%s' % (str(exception), tb_s))
self._reports[id]['exception'] = exception
self._reports[id]['exception'] = ExceptionWithTraceback(tools.exception_to_unicode(exception), tb)
self._reports[id]['state'] = True
cr.close()
return True

View File

@ -84,21 +84,23 @@ class Cursor(object):
self._obj = self._cnx.cursor(cursor_factory=psycopg1cursor)
self.autocommit(False)
self.dbname = pool.dbname
from inspect import stack
self.__caller = tuple(stack()[2][1:3])
if tools.config['log_level'] in (netsvc.LOG_DEBUG, netsvc.LOG_DEBUG_RPC):
from inspect import stack
self.__caller = tuple(stack()[2][1:3])
def __del__(self):
if hasattr(self, '_obj'):
# Oops. 'self' has not been closed explicitly.
# The cursor will be deleted by the garbage collector,
# but the database connection is not put back into the connection
# pool, preventing some operation on the database like dropping it.
# This can also lead to a server overload.
msg = "Cursor not closed explicitly\n" \
"Cursor was created at %s:%s" % self.__caller
if tools.config['log_level'] in (netsvc.LOG_DEBUG, netsvc.LOG_DEBUG_RPC):
# Oops. 'self' has not been closed explicitly.
# The cursor will be deleted by the garbage collector,
# but the database connection is not put back into the connection
# pool, preventing some operation on the database like dropping it.
# This can also lead to a server overload.
msg = "Cursor not closed explicitly\n" \
"Cursor was created at %s:%s" % self.__caller
log(msg, netsvc.LOG_WARNING)
log(msg, netsvc.LOG_WARNING)
self.close()
@check

View File

@ -88,5 +88,5 @@ class mysocket:
else:
return res[0]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:

View File

@ -1,7 +1,7 @@
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
@ -60,6 +60,7 @@ class configmanager(object):
'language': None,
'pg_path': None,
'admin_passwd': 'admin',
'csv_internal_sep': ',',
'addons_path': None,
'root_path': None,
'debug_mode': False,
@ -76,22 +77,22 @@ class configmanager(object):
'syslog' : False,
'log_level': logging.INFO,
'assert_exit_level': logging.WARNING, # level above which a failed assert will be raise
'cache_timeout': 100000,
'cache_timeout': 100000,
}
hasSSL = check_ssl()
loglevels = dict([(getattr(netsvc, 'LOG_%s' % x), getattr(logging, x))
for x in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'DEBUG_RPC', 'NOTSET')])
for x in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'DEBUG_RPC', 'NOTSET')])
version = "%s %s" % (release.description, release.version)
parser = optparse.OptionParser(version=version)
parser.add_option("-c", "--config", dest="config", help="specify alternate config file")
parser.add_option("-s", "--save", action="store_true", dest="save", default=False,
parser.add_option("-s", "--save", action="store_true", dest="save", default=False,
help="save configuration to ~/.openerp_serverrc")
parser.add_option("--pidfile", dest="pidfile", help="file where the server pid will be stored")
parser.add_option("-n", "--interface", dest="interface", help="specify the TCP IP address")
parser.add_option("-p", "--port", dest="port", help="specify the TCP port", type="int")
parser.add_option("--net_interface", dest="netinterface", help="specify the TCP IP address for netrpc")
@ -99,37 +100,37 @@ class configmanager(object):
parser.add_option("--no-netrpc", dest="netrpc", action="store_false", default=True, help="disable netrpc")
parser.add_option("--no-xmlrpc", dest="xmlrpc", action="store_false", default=True, help="disable xmlrpc")
parser.add_option("-i", "--init", dest="init", help="init a module (use \"all\" for all modules)")
parser.add_option("--without-demo", dest="without_demo",
parser.add_option("--without-demo", dest="without_demo",
help="load demo data for a module (use \"all\" for all modules)", default=False)
parser.add_option("-u", "--update", dest="update",
parser.add_option("-u", "--update", dest="update",
help="update a module (use \"all\" for all modules)")
parser.add_option("--cache-timeout", dest="cache_timeout",
parser.add_option("--cache-timeout", dest="cache_timeout",
help="set the timeout for the cache system", default=100000, type="int")
# stops the server from launching after initialization
parser.add_option("--stop-after-init", action="store_true", dest="stop_after_init", default=False,
parser.add_option("--stop-after-init", action="store_true", dest="stop_after_init", default=False,
help="stop the server after it initializes")
parser.add_option('--debug', dest='debug_mode', action='store_true', default=False, help='enable debug mode')
parser.add_option("--assert-exit-level", dest='assert_exit_level', type="choice", choices=loglevels.keys(),
parser.add_option("--assert-exit-level", dest='assert_exit_level', type="choice", choices=loglevels.keys(),
help="specify the level at which a failed assertion will stop the server. Accepted values: %s" % (loglevels.keys(),))
if hasSSL:
group = optparse.OptionGroup(parser, "SSL Configuration")
group.add_option("-S", "--secure", dest="secure", action="store_true",
group.add_option("-S", "--secure", dest="secure", action="store_true",
help="launch server over https instead of http", default=False)
group.add_option("--cert-file", dest="secure_cert_file",
default="server.cert",
default="server.cert",
help="specify the certificate file for the SSL connection")
group.add_option("--pkey-file", dest="secure_pkey_file",
group.add_option("--pkey-file", dest="secure_pkey_file",
default="server.pkey",
help="specify the private key file for the SSL connection")
parser.add_option_group(group)
# Logging Group
group = optparse.OptionGroup(parser, "Logging Configuration")
group.add_option("--logfile", dest="logfile", help="file where the server log will be stored")
group.add_option("--syslog", action="store_true", dest="syslog",
default=False, help="Send the log to the syslog server")
group.add_option('--log-level', dest='log_level', type='choice', choices=loglevels.keys(),
group.add_option('--log-level', dest='log_level', type='choice', choices=loglevels.keys(),
help='specify the level of the logging. Accepted values: ' + str(loglevels.keys()))
parser.add_option_group(group)
@ -144,17 +145,17 @@ class configmanager(object):
group.add_option('--smtp-password', dest='smtp_password', default='', help='specify the SMTP password for sending email')
group.add_option('--price_accuracy', dest='price_accuracy', default='2', help='specify the price accuracy')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Database related options")
group.add_option("-d", "--database", dest="db_name", help="specify the database name")
group.add_option("-r", "--db_user", dest="db_user", help="specify the database user name")
group.add_option("-w", "--db_password", dest="db_password", help="specify the database password")
group.add_option("--pg_path", dest="pg_path", help="specify the pg executable path")
group.add_option("--db_host", dest="db_host", help="specify the database host")
group.add_option("--db_port", dest="db_port", help="specify the database port", type="int")
group.add_option("--db_maxconn", dest="db_maxconn", default='64',
group.add_option("-w", "--db_password", dest="db_password", help="specify the database password")
group.add_option("--pg_path", dest="pg_path", help="specify the pg executable path")
group.add_option("--db_host", dest="db_host", help="specify the database host")
group.add_option("--db_port", dest="db_port", help="specify the database port", type="int")
group.add_option("--db_maxconn", dest="db_maxconn", default='64',
help="specify the the maximum number of physical connections to posgresql")
group.add_option("-P", "--import-partial", dest="import_partial",
group.add_option("-P", "--import-partial", dest="import_partial",
help="Use this for big data importation, if it crashes you will be able to continue at the current state. Provide a filename to store intermediate importation states.", default=False)
parser.add_option_group(group)
@ -164,16 +165,16 @@ class configmanager(object):
"Option '-l' is mandatory in case of importation"
)
group.add_option('-l', "--language", dest="language",
group.add_option('-l', "--language", dest="language",
help="specify the language of the translation file. Use it with --i18n-export or --i18n-import")
group.add_option("--i18n-export", dest="translate_out",
group.add_option("--i18n-export", dest="translate_out",
help="export all sentences to be translated to a CSV file, a PO file or a TGZ archive and exit")
group.add_option("--i18n-import", dest="translate_in",
group.add_option("--i18n-import", dest="translate_in",
help="import a CSV or a PO file with translations and exit. The '-l' option is required.")
group.add_option("--modules", dest="translate_modules",
group.add_option("--modules", dest="translate_modules",
help="specify modules to export. Use in combination with --i18n-export")
group.add_option("--addons-path", dest="addons_path",
help="specify an alternative addons path.",
group.add_option("--addons-path", dest="addons_path",
help="specify an alternative addons path.",
action="callback", callback=self._check_addons_path, nargs=1, type="string")
parser.add_option_group(group)
@ -194,7 +195,7 @@ class configmanager(object):
self.rcfile = fname or opt.config or os.environ.get('OPENERP_SERVER') or rcfilepath
self.load()
# Verify that we want to log or not, if not the output will go to stdout
if self.options['logfile'] in ('None', 'False'):
@ -204,8 +205,8 @@ class configmanager(object):
self.options['pidfile'] = False
keys = ['interface', 'port', 'db_name', 'db_user', 'db_password', 'db_host',
'db_port', 'logfile', 'pidfile', 'smtp_port', 'cache_timeout',
'email_from', 'smtp_server', 'smtp_user', 'smtp_password', 'price_accuracy',
'db_port', 'logfile', 'pidfile', 'smtp_port', 'cache_timeout',
'email_from', 'smtp_server', 'smtp_user', 'smtp_password', 'price_accuracy',
'netinterface', 'netport', 'db_maxconn', 'import_partial', 'addons_path',
'netrpc', 'xmlrpc', 'syslog', 'without_demo']
@ -217,7 +218,7 @@ class configmanager(object):
if getattr(opt, arg):
self.options[arg] = getattr(opt, arg)
keys = ['language', 'translate_out', 'translate_in', 'debug_mode',
keys = ['language', 'translate_out', 'translate_in', 'debug_mode',
'stop_after_init']
for arg in keys:
@ -228,7 +229,7 @@ class configmanager(object):
if opt.log_level:
self.options['log_level'] = loglevels[opt.log_level]
if not self.options['root_path'] or self.options['root_path']=='None':
self.options['root_path'] = os.path.abspath(os.path.dirname(sys.argv[0]))
if not self.options['addons_path'] or self.options['addons_path']=='None':
@ -240,7 +241,7 @@ class configmanager(object):
self.options['translate_modules'] = opt.translate_modules and map(lambda m: m.strip(), opt.translate_modules.split(',')) or ['all']
self.options['translate_modules'].sort()
if opt.pg_path:
self.options['pg_path'] = opt.pg_path

View File

@ -184,7 +184,7 @@ class assertion_report(object):
def record_assertion(self, success, severity):
"""
Records the result of an assertion for the failed/success count
retrurns success
returns success
"""
if severity in self._report:
self._report[severity][success] += 1
@ -208,8 +208,17 @@ class assertion_report(object):
class xml_import(object):
def isnoupdate(self, data_node = None):
return self.noupdate or (data_node and data_node.getAttribute('noupdate').strip() not in ('', '0', 'False'))
@staticmethod
def nodeattr2bool(node, attr, default=False):
if not node.hasAttribute(attr):
return default
val = node.getAttribute(attr).strip()
if not val:
return default
return val.lower() not in ('0', 'false', 'off')
def isnoupdate(self, data_node=None):
return self.noupdate or (data_node and self.nodeattr2bool(data_node, 'noupdate', False))
def get_context(self, data_node, node, eval_dict):
data_node_context = (data_node and data_node.getAttribute('context').encode('utf8'))
@ -263,7 +272,7 @@ form: module.record_id""" % (xml_id,)
for dest,f in (('name','string'),('model','model'),('report_name','name')):
res[dest] = rec.getAttribute(f).encode('utf8')
assert res[dest], "Attribute %s of report is empty !" % (f,)
for field,dest in (('rml','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment')):
for field,dest in (('rml','report_rml'),('xml','report_xml'),('xsl','report_xsl'),('attachment','attachment'),('attachment_use','attachment_use')):
if rec.hasAttribute(field):
res[dest] = rec.getAttribute(field).encode('utf8')
if rec.hasAttribute('auto'):
@ -684,12 +693,11 @@ form: module.record_id""" % (xml_id,)
return None
else:
# if the resource didn't exist
if rec.getAttribute("forcecreate"):
# we want to create it, so we let the normal "update" behavior happen
pass
else:
# otherwise do nothing
if not self.nodeattr2bool(rec, 'forcecreate', True):
# we don't want to create it, so we skip it
return None
# else, we let the record to be created
else:
# otherwise it is skipped
return None
@ -843,7 +851,7 @@ def convert_csv_import(cr, module, fname, csvcontent, idref=None, mode='init',
datas.append(map(lambda x: misc.ustr(x), line))
except:
logger = netsvc.Logger()
logger.notifyChannel("init", netsvc.LOG_ERROR, "Can not import the line: %s" % line)
logger.notifyChannel("init", netsvc.LOG_ERROR, "Cannot import the line: %s" % line)
pool.get(model).import_data(cr, uid, fields, datas,mode, module,noupdate,filename=fname_partial)
if config.get('import_partial'):
data = pickle.load(file(config.get('import_partial')))
@ -865,7 +873,7 @@ def convert_xml_import(cr, module, xmlfile, idref=None, mode='init', noupdate=Fa
relaxng.assert_(doc)
except Exception, e:
logger = netsvc.Logger()
logger.notifyChannel('init', netsvc.LOG_ERROR, 'The XML file do not fit the required schema !')
logger.notifyChannel('init', netsvc.LOG_ERROR, 'The XML file does not fit the required schema !')
logger.notifyChannel('init', netsvc.LOG_ERROR, relaxng.error_log.last_error)
raise

View File

@ -299,7 +299,7 @@ def reverse_enumerate(l):
#----------------------------------------------------------
# Emails
#----------------------------------------------------------
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False, attach=None, tinycrm=False, ssl=False, debug=False,subtype='plain'):
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False, attach=None, tinycrm=False, ssl=False, debug=False, subtype='plain'):
"""Send an email."""
import smtplib
from email.MIMEText import MIMEText
@ -890,6 +890,26 @@ icons = map(lambda x: (x,x), ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
])
def extract_zip_file(zip_file, outdirectory):
import zipfile
import os
zf = zipfile.ZipFile(zip_file, 'r')
out = outdirectory
for path in zf.namelist():
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
if not tgt.endswith(os.sep):
fp = open(tgt, 'wb')
fp.write(zf.read(path))
fp.close()
zf.close()
if __name__ == '__main__':

View File

@ -33,6 +33,7 @@ import mx.DateTime as mxdt
import tempfile
import tarfile
import codecs
import locale
class UNIX_LINE_TERMINATOR(csv.excel):
@ -163,7 +164,7 @@ class TinyPoFile(object):
def write_infos(self, modules):
import release
self.buffer.write("# Translation of %(project)s.\n" \
"# This file containt the translation of the following modules:\n" \
"# This file contains the translation of the following modules:\n" \
"%(modules)s" \
"#\n" \
"msgid \"\"\n" \
@ -550,8 +551,29 @@ def trans_load_data(db_name, fileobj, fileformat, lang, strict=False, lang_name=
try:
uid = 1
cr = pooler.get_db(db_name).cursor()
ids = lang_obj.search(cr, uid, [('code','=',lang)])
lc, encoding = locale.getdefaultlocale()
if not encoding:
encoding = 'UTF-8'
if encoding == 'utf':
encoding = 'UTF-8'
if encoding == 'cp1252':
encoding= '1252'
if encoding == 'iso-8859-1':
encoding= 'iso-8859-15'
if encoding == 'latin1':
encoding= 'latin9'
try:
if os.name == 'nt':
locale.setlocale(locale.LC_ALL, str(_LOCALE2WIN32.get(lang, lang) + '.' + encoding))
else:
locale.setlocale(locale.LC_ALL, str(lang + '.' + encoding))
except Exception:
netsvc.Logger().notifyChannel(' ', netsvc.LOG_WARNING,
'unable to set locale "%s"' % (lang))
if not ids:
if not lang_name:
lang_name=lang
@ -561,9 +583,21 @@ def trans_load_data(db_name, fileobj, fileformat, lang, strict=False, lang_name=
'code': lang,
'name': lang_name,
'translatable': 1,
'date_format' : str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')),
'time_format' : str(locale.nl_langinfo(locale.T_FMT)),
'grouping' : [],
'decimal_point' : str(locale.nl_langinfo(locale.RADIXCHAR)),
'thousands_sep' : str(locale.nl_langinfo(locale.THOUSEP))
})
else:
lang_obj.write(cr, uid, ids, {'translatable':1})
lang_obj.write(cr, uid, ids, {'translatable':1,
'date_format' : str(locale.nl_langinfo(locale.D_FMT).replace('%y', '%Y')),
'time_format' : str(locale.nl_langinfo(locale.T_FMT)),
'grouping' : [],
'decimal_point' : str(locale.nl_langinfo(locale.RADIXCHAR)),
'thousands_sep' : str(locale.nl_langinfo(locale.THOUSEP))
})
locale.resetlocale(locale.LC_ALL)
lang_ids = lang_obj.search(cr, uid, [])
langs = lang_obj.read(cr, uid, lang_ids)
ls = map(lambda x: (x['code'],x['name']), langs)