wcs/wcs/qommon/storage.py

1000 lines
34 KiB
Python

# w.c.s. - web application for online forms
# Copyright (C) 2005-2010 Entr'ouvert
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import _thread
import builtins
import copy
import copyreg
import errno
import operator
import os
import os.path
import pickle
import shutil
import sys
import tempfile
import time
from django.utils.decorators import classonlymethod
from django.utils.encoding import force_bytes
from quixote import get_publisher
from . import PICKLE_KWARGS, force_str
from .vendor import locket
# add compatibility names in case those were stored in pickles
sys.modules['copy_reg'] = copyreg
sys.modules['__builtin__'] = builtins
def cache_umask():
global process_umask
process_umask = os.umask(0)
os.umask(process_umask)
# cache umask when loading up the module
cache_umask()
def _take(objects, limit, offset=0):
for y in objects:
if offset:
offset -= 1
continue
if limit:
limit -= 1
elif limit == 0:
break
elif limit is None:
pass
yield y
def lax_int(s):
try:
return int(s)
except ValueError:
return -1
def fix_key(k):
# insure key can be inserted in filesystem
if not k:
return k
return str(k).replace('/', '-')
def atomic_write(path, content, async_op=False):
"""Rewrite a complete file automatically, that is write to new file with
temporary name, fsync, then rename to final name. Use threads to remove blocking."""
def doit():
dirname = os.path.dirname(path)
fd, temp = tempfile.mkstemp(dir=dirname, prefix='.tmp-' + os.path.basename(path) + '-')
os.fchmod(fd, 0o666 & ~process_umask)
f = os.fdopen(fd, 'wb')
if hasattr(content, 'read'):
# file pointer
def read100k():
return content.read(100000)
for piece in iter(read100k, b''):
f.write(piece)
else:
f.write(content)
f.flush()
os.fsync(f.fileno())
f.close()
os.rename(temp, path)
if async_op:
_thread.start_new_thread(doit, ())
else:
doit()
def deep_bytes2str(obj, seen=None):
# Convert obj loaded by unpickle(encoding='bytes') to a proper object using
# strings; this is required as encoding='utf-8' is not possible when there
# are pickled datetime objects. <https://bugs.python.org/issue22005>
if seen is None:
seen = {}
if obj is None or isinstance(obj, (int, float, str, time.struct_time, type(Ellipsis))):
return obj
if isinstance(obj, bytes):
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
return obj
if isinstance(obj, list):
return [deep_bytes2str(x, seen) for x in obj]
if isinstance(obj, dict):
new_d = {}
for k, v in obj.items():
new_d[force_str(k)] = deep_bytes2str(v, seen)
return new_d
if id(obj) in seen:
return obj
seen[id(obj)] = True
if hasattr(obj, '__class__') and obj.__class__.__module__.startswith(('wcs.', 'qommon.', 'modules.')):
obj.__dict__ = deep_bytes2str(obj.__dict__, seen)
return obj
return obj
def pickle_2to3_conversion(obj):
obj.__dict__ = deep_bytes2str(obj.__dict__) # inplace
class Criteria:
def __init__(self, attribute, value, **kwargs):
self.attribute = attribute
self.value = value
# Python 3 requires comparisons to disparate types, this means we need
# to create a null value of the appropriate type, so None values can
# still be sorted.
self.typed_none = ''
if isinstance(self.value, bool):
self.typed_none = False
elif isinstance(self.value, (int, float)):
self.typed_none = -sys.maxsize
elif isinstance(self.value, time.struct_time):
self.typed_none = time.gmtime(-(10**10)) # 1653
def build_lambda(self):
def func(x):
attribute = getattr(x, self.attribute, None)
if isinstance(self.value, int):
try:
attribute = int(attribute)
except TypeError:
pass
return self.op(attribute or self.typed_none, self.value)
return func
def __repr__(self):
return '<%s (attribute: %r%s)>' % (
self.__class__.__name__,
getattr(self, 'attribute', None),
', value: %s' % repr(self.value) if hasattr(self, 'value') else '',
)
class Less(Criteria):
op = operator.lt
class Greater(Criteria):
op = operator.gt
class Equal(Criteria):
op = operator.eq
class Contains(Criteria):
op = operator.contains
def build_lambda(self):
# noqa pylint: disable=too-many-function-args
return lambda x: self.op(self.value, getattr(x, self.attribute, ''))
class Intersects(Criteria):
def build_lambda(self):
value = set(self.value)
def func(x):
try:
return value.intersection(set(getattr(x, self.attribute, []) or []))
except KeyError:
# this may happen if used to check a formdata field that didn't
# exist when the formdata was created.
return False
return func
class Not(Criteria):
def __init__(self, criteria, **kwargs):
self.criteria = criteria
def build_lambda(self):
func = lambda x: False
def combine_callables(x1, x2):
return lambda x: not x2(x)
func = combine_callables(func, self.criteria.build_lambda())
return func
def __repr__(self):
return '<%s (%r)>' % (self.__class__.__name__, self.criteria)
class Or(Criteria):
def __init__(self, criterias, **kwargs):
self.criterias = criterias
def build_lambda(self):
func = lambda x: False
def combine_callables(x1, x2):
return lambda x: x1(x) or x2(x)
for element in self.criterias:
func = combine_callables(func, element.build_lambda())
return func
def __repr__(self):
return '<%s (%r)>' % (self.__class__.__name__, self.criterias)
class And(Criteria):
def __init__(self, criterias, **kwargs):
self.criterias = criterias
def build_lambda(self):
func = lambda x: True
def combine_callables(x1, x2):
return lambda x: x1(x) and x2(x)
for element in self.criterias:
func = combine_callables(func, element.build_lambda())
return func
def __repr__(self):
return '<%s (%r)>' % (self.__class__.__name__, self.criterias)
def parse_clause(clause):
# creates a callable out of a clause
# (attribute, operator, value)
if callable(clause): # already a callable
return clause
def combine_callables(x1, x2):
return lambda x: x1(x) and x2(x)
func = lambda x: True
for element in clause:
if callable(element):
func = combine_callables(func, element)
else:
func = combine_callables(func, element.build_lambda())
return func
class NothingToUpdate(Exception):
pass
class StorageIndexException(Exception):
pass
class StorableObject:
_indexes = None
_hashed_indexes = None
_filename = None # None, unless must be saved to a specific location
_reset_class = True # reset loaded object class
SLUG_DASH = '-'
def __init__(self, id=None):
self.id = id
def __getstate__(self):
odict = copy.copy(self.__dict__)
if '_applications' in odict:
del odict['_applications']
return odict
def __setstate__(self, ndict):
self.__dict__ = ndict
if hasattr(self, '_applications'):
delattr(self, '_applications')
def is_readonly(self):
return getattr(self, 'readonly', False)
@classmethod
def get_table_name(cls):
return cls._names
@classmethod
def get_objects_dir(cls):
return os.path.join(get_publisher().app_dir, cls.get_table_name())
@classmethod
def keys(cls, clause=None):
if not os.path.exists(cls.get_objects_dir()):
return []
if clause:
return [x.id for x in cls.select(clause=clause)]
return [fix_key(x) for x in os.listdir(cls.get_objects_dir()) if x[0] != '.']
@classmethod
def values(cls, ignore_errors=False, ignore_migration=True):
values = [cls.get(x, ignore_errors=ignore_errors, ignore_migration=True) for x in cls.keys()]
return [x for x in values if x is not None]
@classmethod
def items(cls):
return [(x, cls.get(x)) for x in cls.keys()]
@classmethod
def count(cls, clause=None):
if clause:
return len(cls.select(clause))
return len(cls.keys())
@classmethod
def exists(cls, clause=None):
return bool(cls.count(clause))
@classmethod
def sort_results(cls, objects, order_by):
if not order_by:
return objects
order_by = str(order_by)
if order_by[0] == '-':
reverse = True
order_by = order_by[1:]
else:
reverse = False
# only list can be sorted
objects = list(objects)
if order_by == 'id':
key_function = lambda x: lax_int(x.id)
elif order_by == 'name':
# proper collation should be done but it's messy to get working
# on all systems so we go the cheap and almost ok way.
from .misc import simplify
key_function = lambda x: simplify(x.name)
elif order_by.endswith('_time'):
typed_none = time.gmtime(-(10**10)) # 1653
key_function = lambda x: getattr(x, order_by) or typed_none
else:
key_function = lambda x: getattr(x, order_by)
objects.sort(key=key_function)
if reverse:
objects.reverse()
return objects
@classmethod
def select(
cls,
clause=None,
order_by=None,
ignore_errors=False,
ignore_migration=False,
limit=None,
offset=None,
iterator=False,
itersize=None,
**kwargs,
):
# iterator: only for compatibility with sql select()
keys = cls.keys()
objects = (
cls.get(k, ignore_errors=ignore_errors, ignore_migration=ignore_migration, **kwargs) for k in keys
)
if ignore_errors:
objects = (x for x in objects if x is not None)
if clause:
clause_function = parse_clause(clause)
objects = (x for x in objects if clause_function(x))
objects = cls.sort_results(objects, order_by)
if limit or offset:
objects = _take(objects, limit, offset)
return list(objects)
@classmethod
def select_iterator(cls, **kwargs):
yield from cls.select(**kwargs)
@classmethod
def has_key(cls, id):
filename = os.path.join(cls.get_objects_dir(), fix_key(id))
return os.path.exists(force_bytes(filename, 'utf-8'))
@classmethod
def get_new_id(cls, create=False):
objects_dir = cls.get_objects_dir()
try:
with open(os.path.join(objects_dir, '.max_id')) as fd:
max_id = int(fd.read())
except (OSError, ValueError):
max_id = 0
keys = cls.keys()
if not keys:
id = max_id + 1
else:
id = max([lax_int(x) for x in keys] + [max_id]) + 1
if id == 0:
id = len(keys) + 1
if create:
object_filename = os.path.join(objects_dir, fix_key(id))
try:
fd = os.open(object_filename, os.O_CREAT | os.O_EXCL)
except OSError:
return cls.get_new_id(create=True)
os.close(fd)
with open(os.path.join(objects_dir, '.max_id'), 'w') as fd:
fd.write(str(id))
return str(id)
@classmethod
def cached_get(cls, id, ignore_errors=False, **kwargs):
pub = get_publisher()
cached_object = pub._cached_objects[cls._names or cls._table_name].get(id)
if isinstance(cached_object, KeyError):
if ignore_errors:
return None
else:
raise KeyError(id)
elif cached_object is not None:
return cached_object
o = cls.get(id, ignore_errors=True, **kwargs)
pub._cached_objects[cls._names][id] = o if o is not None else KeyError()
if o is None and not ignore_errors:
raise KeyError(id)
return o
@classmethod
def get(cls, id, ignore_errors=False, ignore_migration=False, **kwargs):
if id is None:
if ignore_errors:
return None
else:
raise KeyError()
filename = os.path.join(cls.get_objects_dir(), fix_key(id))
return cls.get_filename(
filename, ignore_errors=ignore_errors, ignore_migration=ignore_migration, **kwargs
)
@classmethod
def get_by_slug(cls, slug, ignore_errors=True):
if cls._indexes and 'slug' in cls._indexes:
return cls.get_on_index(slug, 'slug', ignore_errors=ignore_errors)
objects = [x for x in cls.select(ignore_errors=True) if x and x.slug == slug]
if objects:
return objects[0]
if ignore_errors:
return None
raise KeyError(slug)
def get_new_slug(self, base=None):
from .misc import simplify
new_slug = simplify(base or self.name, space=self.SLUG_DASH)[:250]
base_new_slug = new_slug
suffix_no = 0
has_index = bool(self._indexes and 'slug' in self._indexes)
if not has_index:
# preload all slugs
known_slugs = {x.slug: x.id for x in self.select(ignore_migration=True, ignore_errors=True) if x}
while True:
if has_index:
obj = self.get_on_index(new_slug, 'slug', ignore_errors=True, ignore_migration=True)
if obj is None or (self.id and str(obj.id) == str(self.id)):
break
else:
if new_slug not in known_slugs:
break
if self.id and str(known_slugs[new_slug]) == str(self.id):
break
suffix_no += 1
new_slug = '%s%s%s' % (base_new_slug, self.SLUG_DASH, suffix_no)
return new_slug
def refresh_from_storage(self):
obj = self.get(self.id)
self.__dict__ = obj.__dict__
@classmethod
def get_ids(cls, ids, ignore_errors=False, order_by=None, **kwargs):
objects = []
for x in ids:
obj = cls.get(x, ignore_errors=ignore_errors)
if obj is not None:
objects.append(obj)
return cls.sort_results(objects, order_by)
@classmethod
def get_on_index(cls, id, index, ignore_errors=False, ignore_migration=False):
if not cls._indexes:
raise KeyError()
objects_dir = cls.get_objects_dir()
index_dir = objects_dir + '-' + index
if not os.path.exists(index_dir):
cls.rebuild_indexes()
filename = os.path.join(index_dir, str(fix_key(id)))
return cls.get_filename(filename, ignore_errors=ignore_errors, ignore_migration=ignore_migration)
@classmethod
def get_ids_with_indexed_value(cls, index, value, auto_fallback=True):
objects_dir = cls.get_objects_dir()
index_dir = os.path.join(objects_dir, '.indexes', str(index))
index_file = os.path.join(index_dir, '%s-%s' % (index, fix_key(value)))
if not os.path.exists(index_dir):
if auto_fallback is False:
raise StorageIndexException()
try:
cls.rebuild_indexes()
except StorageIndexException:
values = cls.select(ignore_errors=True)
return [x for x in values if getattr(x, index) == value]
if not os.path.exists(index_file):
return []
with open(index_file, 'rb') as fd:
return pickle.load(fd)
@classmethod
def get_with_indexed_value(cls, index, value, ignore_errors=False):
ids = cls.get_ids_with_indexed_value(str(index), str(value))
for x in ids:
obj = cls.get(x, ignore_errors=ignore_errors)
if obj is not None:
yield obj
@classmethod
def storage_load(cls, fd):
if get_publisher() and get_publisher().unpickler_class:
unpickler = get_publisher().unpickler_class
else:
unpickler = pickle.Unpickler
return unpickler(fd, **PICKLE_KWARGS).load()
@classmethod
def get_filename(cls, filename, ignore_errors=False, ignore_migration=False, **kwargs):
fd = None
try:
fd = open(force_bytes(filename, 'utf-8'), 'rb') # pylint: disable=consider-using-with
o = cls.storage_load(fd, **kwargs)
except OSError:
if ignore_errors:
return None
raise KeyError()
except ImportError:
if ignore_errors:
return None
raise KeyError()
except EOFError:
# maybe it's being written to, loop for a while to see
current_position = fd.tell()
for dummy in range(10):
time.sleep(0.01)
if current_position != os.stat(filename).st_size:
return cls.get_filename(
filename, ignore_errors=ignore_errors, ignore_migration=ignore_migration
)
if ignore_errors:
return None
raise KeyError()
finally:
if fd:
fd.close()
if cls._reset_class:
o.__class__ = cls
if any(isinstance(k, bytes) for k in o.__dict__):
pickle_2to3_conversion(o)
o._upgrade_must_store = True
if not ignore_migration:
o.id = str(o.id) # makes sure 'id' is a string
if hasattr(o, '_upgrade_must_store'):
delattr(o, '_upgrade_must_store')
o.store()
if hasattr(cls, 'migrate'):
o.migrate()
return o
@classmethod
def rebuild_indexes(cls, indexes=None):
indexes = indexes or []
if not (cls._indexes or cls._hashed_indexes):
return
if not indexes:
indexes = (cls._hashed_indexes or []) + (cls._indexes or [])
objects_dir = cls.get_objects_dir()
hashed_indexes = {}
for index in cls._hashed_indexes or []:
if index not in indexes:
continue
index_dir = os.path.join(objects_dir, '.indexes', index)
if not os.path.exists(index_dir):
try:
os.makedirs(index_dir)
except OSError:
raise StorageIndexException()
for object in cls.values(ignore_errors=True, ignore_migration=True):
object_filename = os.path.join(objects_dir, fix_key(object.id))
relative_object_filename = os.path.join('..', cls.get_table_name(), fix_key(object.id))
for index in cls._indexes or []:
if index not in indexes:
continue
if not hasattr(object, index) or getattr(object, index) is None:
continue
index_dir = objects_dir + '-' + index
link_name = os.path.join(index_dir, fix_key(str(getattr(object, index))))
try:
if relative_object_filename:
os.symlink(relative_object_filename, link_name)
else:
os.symlink(object_filename, link_name)
except OSError as exc:
if exc.errno == 2:
os.mkdir(index_dir)
elif exc.errno == 17:
try:
os.unlink(link_name)
except OSError as exc2:
if exc2.errno != 2: # no such file or directory
raise
# link got created in a different process, move
# along.
continue
else:
raise
if relative_object_filename:
os.symlink(relative_object_filename, link_name)
else:
os.symlink(object_filename, link_name)
for index in cls._hashed_indexes or []:
if index not in indexes:
continue
if not hasattr(object, index) or getattr(object, index) is None:
continue
attribute = getattr(object, index)
if isinstance(attribute, dict):
attribute_list = []
for value in attribute.values():
if isinstance(value, list):
attribute_list.extend(value)
else:
attribute_list.append(value)
attribute = attribute_list
elif type(attribute) not in (tuple, list, set):
attribute = [attribute]
for attr in attribute:
attr_value = fix_key(attr)
index_name = '%s-%s' % (index, attr_value)
if index_name not in hashed_indexes:
hashed_indexes[index_name] = []
hashed_indexes[index_name].append(str(object.id))
for index, content in hashed_indexes.items():
index_key = index.split('-')[0]
if index_key not in indexes:
continue
index_file = os.path.join(objects_dir, '.indexes', index_key, index)
with open(index_file, 'wb') as fd:
pickle.dump(content, fd, protocol=2)
for index in cls._hashed_indexes or []:
if index not in indexes:
continue
index_dir = os.path.join(objects_dir, '.indexes', index)
for filename in os.listdir(index_dir):
if filename not in hashed_indexes:
os.unlink(os.path.join(index_dir, filename))
def get_object_filename(self):
if self._filename:
if self._filename[0] == '/':
return self._filename
else:
return os.path.join(get_publisher().app_dir, self._filename)
else:
objects_dir = self.get_objects_dir()
return os.path.join(objects_dir, fix_key(self.id))
@classmethod
def storage_dumps(cls, object):
return pickle.dumps(object, protocol=2)
def store(self, async_op=False, where=None):
assert not self.is_readonly()
objects_dir = self.get_objects_dir()
new_object = False
if self._filename:
if self._filename[0] == '/':
object_filename = self._filename
relative_object_filename = None
else:
object_filename = os.path.join(get_publisher().app_dir, self._filename)
relative_object_filename = os.path.join('..', self._filename)
else:
if not os.path.exists(objects_dir):
try:
os.mkdir(objects_dir)
except OSError as error:
if error.errno != 17: # 17 == Directory exists
raise
if self.id is None:
self.id = self.get_new_id(create=True)
new_object = True
object_filename = os.path.join(objects_dir, fix_key(self.id))
relative_object_filename = os.path.join('..', self.get_table_name(), fix_key(self.id))
previous_object_value = None
if not new_object and (self._indexes or self._hashed_indexes):
previous_object_value = self.get_filename(
object_filename, ignore_errors=True, ignore_migration=True
)
s = self.storage_dumps(self)
atomic_write(object_filename, s, async_op)
# update last modified time
if os.path.exists(objects_dir):
os.utime(objects_dir, None)
with locket.lock_file(objects_dir + '.lock.index'):
try:
self.update_indexes(previous_object_value, relative_object_filename)
except Exception as e:
# something failed, we can't keep using possibly broken indexes, so
# we notify of the bug and remove the indexes
get_publisher().record_error(exception=e, context='[STORAGE]', notify=True)
self.destroy_indexes()
@classmethod
def destroy_indexes(cls):
objects_dir = cls.get_objects_dir()
directories_to_trash = []
directories_to_wipe = []
for index in cls._indexes or []:
index_dir = objects_dir + '-' + index
directories_to_trash.append(index_dir)
directories_to_trash.append(os.path.join(objects_dir, '.indexes'))
for directory in directories_to_trash:
if not os.path.exists(directory):
continue
i = 0
while True:
trashed_index_name = directory + '.trash-%s' % i
i += 1
try:
os.mkdir(trashed_index_name)
except OSError:
continue
try:
os.rename(directory, os.path.join(trashed_index_name, 'idx'))
except OSError:
continue
directories_to_wipe.append(trashed_index_name)
break
for directory in directories_to_wipe:
shutil.rmtree(directory)
def update_indexes(self, previous_object_value, relative_object_filename):
objects_dir = self.get_objects_dir()
rebuilt_indexes = False
for index in self._indexes or []:
if not hasattr(self, index) or getattr(self, index) is None:
continue
index_dir = objects_dir + '-' + index
link_name = os.path.join(index_dir, fix_key(str(getattr(self, index))))
if previous_object_value:
old_link_name = os.path.join(index_dir, fix_key(str(getattr(previous_object_value, index))))
if os.path.exists(old_link_name):
if old_link_name == link_name:
continue
os.unlink(old_link_name)
try:
if relative_object_filename:
os.symlink(relative_object_filename, link_name)
else:
os.symlink(self.get_object_filename(), link_name)
except OSError as exc:
if exc.errno == 2:
os.mkdir(index_dir)
if not rebuilt_indexes:
# perhaps index dir got removed; rebuild it before
# adding elements to it.
self.rebuild_indexes()
rebuilt_indexes = True
elif exc.errno == 17:
os.unlink(link_name)
else:
raise
if not rebuilt_indexes:
if relative_object_filename:
os.symlink(relative_object_filename, link_name)
else:
os.symlink(self.get_object_filename(), link_name)
for index in self._hashed_indexes or []:
index_dir = os.path.join(objects_dir, '.indexes', index)
if not os.path.exists(index_dir):
try:
os.makedirs(index_dir)
except OSError as e:
if e.errno == errno.EEXIST: # File exists
pass
old_value = []
if isinstance(getattr(self, index), dict):
new_value = []
for value in (getattr(self, index) or {}).values():
if isinstance(value, list):
new_value.extend(value)
else:
new_value.append(value)
if previous_object_value:
old_value = []
for value in (getattr(previous_object_value, index) or {}).values():
if isinstance(value, list):
new_value.extend(value)
else:
new_value.append(value)
elif type(getattr(self, index)) in (tuple, list, set):
new_value = getattr(self, index)
if previous_object_value:
old_value = getattr(previous_object_value, index)
if old_value is None:
old_value = []
else:
new_value = [getattr(self, index)]
if previous_object_value:
old_raw_value = getattr(previous_object_value, index)
if isinstance(old_raw_value, dict):
old_value = old_raw_value.values()
elif type(old_raw_value) in (tuple, list, set):
old_value = old_raw_value
else:
old_value = [old_raw_value]
for oldv in old_value:
if oldv in new_value:
continue
old_index_name = '%s-%s' % (index, fix_key(oldv))
old_index_file = os.path.join(index_dir, old_index_name)
if os.path.exists(old_index_file):
with open(old_index_file, 'rb') as fd:
ids = [str(x) for x in pickle.load(fd)]
if str(self.id) in ids:
ids.remove(str(self.id))
with open(old_index_file, 'wb') as fd:
pickle.dump(ids, fd, protocol=2)
for newv in new_value:
if newv in old_value:
continue
index_name = '%s-%s' % (index, fix_key(newv))
index_file = os.path.join(index_dir, index_name)
if os.path.exists(index_file):
with open(index_file, 'rb') as fd:
ids = [str(x) for x in pickle.load(fd)]
else:
ids = []
if str(self.id) not in ids:
ids.append(str(self.id))
with open(index_file, 'wb') as fd:
pickle.dump(ids, fd, protocol=2)
@classmethod
def volatile(cls):
o = cls()
o.id = None
return o
@classmethod
def remove_object(cls, id):
objects_dir = cls.get_objects_dir()
if cls._indexes or cls._hashed_indexes:
object = cls.get(id)
for index in cls._indexes or []:
if not hasattr(object, index) or getattr(object, index) is None:
continue
index_dir = objects_dir + '-' + index
link_name = os.path.join(index_dir, fix_key(str(getattr(object, index))))
try:
os.unlink(link_name)
except OSError:
pass
index_dir = os.path.join(objects_dir, '.indexes')
for index in cls._hashed_indexes or []:
attribute = getattr(object, index)
if type(attribute) not in (tuple, list, set):
attribute = [attribute]
for attr in attribute:
attr_value = fix_key(attr)
index_name = '%s-%s' % (index, attr_value)
index_file = os.path.join(index_dir, index, index_name)
if os.path.exists(index_file):
with open(index_file, 'rb') as fd:
ids = [str(x) for x in pickle.load(fd)]
if str(object.id) in ids:
ids.remove(str(object.id))
with open(index_file, 'wb') as fd:
pickle.dump(ids, fd, protocol=2)
os.unlink(os.path.join(objects_dir, fix_key(id)))
def remove_self(self):
assert not self.is_readonly()
self.remove_object(self.id)
self.id = None
def get_last_modification_info(self):
if not get_publisher().snapshot_class:
return None, None
snapshots = get_publisher().snapshot_class.select_object_history(self)
if not snapshots:
return None, None
return snapshots[0].timestamp, snapshots[0].user_id
def get_applications(self):
from wcs.applications import Application
if getattr(self, '_applications', None) is None:
Application.load_for_object(self)
return self._applications
applications = property(get_applications)
@classonlymethod
def wipe(cls):
tmpdir = tempfile.mkdtemp(prefix='wiping', dir=os.path.join(get_publisher().app_dir))
dirs_to_move = []
objects_dir = cls.get_objects_dir()
dirs_to_move.append(objects_dir)
for index in cls._indexes or []:
index_dir = objects_dir + '-' + index
dirs_to_move.append(index_dir)
for directory in dirs_to_move:
if os.path.exists(directory):
os.rename(directory, os.path.join(tmpdir, os.path.basename(directory)))
shutil.rmtree(tmpdir)
def __repr__(self):
if hasattr(self, 'display_name'):
display_name = '%r ' % self.display_name
elif hasattr(self, 'get_display_name'):
display_name = '%r ' % self.get_display_name()
elif hasattr(self, 'name'):
display_name = '%r ' % self.name
else:
display_name = ''
return '<%s %sid:%s>' % (self.__class__.__name__, display_name, self.id)