Ajoutez des fichiers projet.
This commit is contained in:
66
venv/Lib/site-packages/django/core/cache/__init__.py
vendored
Normal file
66
venv/Lib/site-packages/django/core/cache/__init__.py
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
"""
|
||||
Caching framework.
|
||||
|
||||
This package defines set of cache backends that all conform to a simple API.
|
||||
In a nutshell, a cache is a set of values -- which can be any object that
|
||||
may be pickled -- identified by string keys. For the complete API, see
|
||||
the abstract BaseCache class in django.core.cache.backends.base.
|
||||
|
||||
Client code should use the `cache` variable defined here to access the default
|
||||
cache backend and look up non-default cache backends in the `caches` dict-like
|
||||
object.
|
||||
|
||||
See docs/topics/cache.txt for information on the public API.
|
||||
"""
|
||||
from django.core import signals
|
||||
from django.core.cache.backends.base import (
|
||||
BaseCache, CacheKeyWarning, InvalidCacheBackendError, InvalidCacheKey,
|
||||
)
|
||||
from django.utils.connection import BaseConnectionHandler, ConnectionProxy
|
||||
from django.utils.module_loading import import_string
|
||||
|
||||
__all__ = [
|
||||
'cache', 'caches', 'DEFAULT_CACHE_ALIAS', 'InvalidCacheBackendError',
|
||||
'CacheKeyWarning', 'BaseCache', 'InvalidCacheKey',
|
||||
]
|
||||
|
||||
DEFAULT_CACHE_ALIAS = 'default'
|
||||
|
||||
|
||||
class CacheHandler(BaseConnectionHandler):
|
||||
settings_name = 'CACHES'
|
||||
exception_class = InvalidCacheBackendError
|
||||
|
||||
def create_connection(self, alias):
|
||||
params = self.settings[alias].copy()
|
||||
backend = params.pop('BACKEND')
|
||||
location = params.pop('LOCATION', '')
|
||||
try:
|
||||
backend_cls = import_string(backend)
|
||||
except ImportError as e:
|
||||
raise InvalidCacheBackendError(
|
||||
"Could not find backend '%s': %s" % (backend, e)
|
||||
) from e
|
||||
return backend_cls(location, params)
|
||||
|
||||
def all(self, initialized_only=False):
|
||||
return [
|
||||
self[alias] for alias in self
|
||||
# If initialized_only is True, return only initialized caches.
|
||||
if not initialized_only or hasattr(self._connections, alias)
|
||||
]
|
||||
|
||||
|
||||
caches = CacheHandler()
|
||||
|
||||
cache = ConnectionProxy(caches, DEFAULT_CACHE_ALIAS)
|
||||
|
||||
|
||||
def close_caches(**kwargs):
|
||||
# Some caches need to do a cleanup at the end of a request cycle. If not
|
||||
# implemented in a particular backend cache.close() is a no-op.
|
||||
for cache in caches.all(initialized_only=True):
|
||||
cache.close()
|
||||
|
||||
|
||||
signals.request_finished.connect(close_caches)
|
0
venv/Lib/site-packages/django/core/cache/backends/__init__.py
vendored
Normal file
0
venv/Lib/site-packages/django/core/cache/backends/__init__.py
vendored
Normal file
385
venv/Lib/site-packages/django/core/cache/backends/base.py
vendored
Normal file
385
venv/Lib/site-packages/django/core/cache/backends/base.py
vendored
Normal file
@@ -0,0 +1,385 @@
|
||||
"Base Cache class."
|
||||
import time
|
||||
import warnings
|
||||
|
||||
from asgiref.sync import sync_to_async
|
||||
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.utils.module_loading import import_string
|
||||
|
||||
|
||||
class InvalidCacheBackendError(ImproperlyConfigured):
|
||||
pass
|
||||
|
||||
|
||||
class CacheKeyWarning(RuntimeWarning):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidCacheKey(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
# Stub class to ensure not passing in a `timeout` argument results in
|
||||
# the default timeout
|
||||
DEFAULT_TIMEOUT = object()
|
||||
|
||||
# Memcached does not accept keys longer than this.
|
||||
MEMCACHE_MAX_KEY_LENGTH = 250
|
||||
|
||||
|
||||
def default_key_func(key, key_prefix, version):
|
||||
"""
|
||||
Default function to generate keys.
|
||||
|
||||
Construct the key used by all other methods. By default, prepend
|
||||
the `key_prefix`. KEY_FUNCTION can be used to specify an alternate
|
||||
function with custom key making behavior.
|
||||
"""
|
||||
return '%s:%s:%s' % (key_prefix, version, key)
|
||||
|
||||
|
||||
def get_key_func(key_func):
|
||||
"""
|
||||
Function to decide which key function to use.
|
||||
|
||||
Default to ``default_key_func``.
|
||||
"""
|
||||
if key_func is not None:
|
||||
if callable(key_func):
|
||||
return key_func
|
||||
else:
|
||||
return import_string(key_func)
|
||||
return default_key_func
|
||||
|
||||
|
||||
class BaseCache:
|
||||
_missing_key = object()
|
||||
|
||||
def __init__(self, params):
|
||||
timeout = params.get('timeout', params.get('TIMEOUT', 300))
|
||||
if timeout is not None:
|
||||
try:
|
||||
timeout = int(timeout)
|
||||
except (ValueError, TypeError):
|
||||
timeout = 300
|
||||
self.default_timeout = timeout
|
||||
|
||||
options = params.get('OPTIONS', {})
|
||||
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
|
||||
try:
|
||||
self._max_entries = int(max_entries)
|
||||
except (ValueError, TypeError):
|
||||
self._max_entries = 300
|
||||
|
||||
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
|
||||
try:
|
||||
self._cull_frequency = int(cull_frequency)
|
||||
except (ValueError, TypeError):
|
||||
self._cull_frequency = 3
|
||||
|
||||
self.key_prefix = params.get('KEY_PREFIX', '')
|
||||
self.version = params.get('VERSION', 1)
|
||||
self.key_func = get_key_func(params.get('KEY_FUNCTION'))
|
||||
|
||||
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
|
||||
"""
|
||||
Return the timeout value usable by this backend based upon the provided
|
||||
timeout.
|
||||
"""
|
||||
if timeout == DEFAULT_TIMEOUT:
|
||||
timeout = self.default_timeout
|
||||
elif timeout == 0:
|
||||
# ticket 21147 - avoid time.time() related precision issues
|
||||
timeout = -1
|
||||
return None if timeout is None else time.time() + timeout
|
||||
|
||||
def make_key(self, key, version=None):
|
||||
"""
|
||||
Construct the key used by all other methods. By default, use the
|
||||
key_func to generate a key (which, by default, prepends the
|
||||
`key_prefix' and 'version'). A different key function can be provided
|
||||
at the time of cache construction; alternatively, you can subclass the
|
||||
cache backend to provide custom key making behavior.
|
||||
"""
|
||||
if version is None:
|
||||
version = self.version
|
||||
|
||||
return self.key_func(key, self.key_prefix, version)
|
||||
|
||||
def validate_key(self, key):
|
||||
"""
|
||||
Warn about keys that would not be portable to the memcached
|
||||
backend. This encourages (but does not force) writing backend-portable
|
||||
cache code.
|
||||
"""
|
||||
for warning in memcache_key_warnings(key):
|
||||
warnings.warn(warning, CacheKeyWarning)
|
||||
|
||||
def make_and_validate_key(self, key, version=None):
|
||||
"""Helper to make and validate keys."""
|
||||
key = self.make_key(key, version=version)
|
||||
self.validate_key(key)
|
||||
return key
|
||||
|
||||
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
"""
|
||||
Set a value in the cache if the key does not already exist. If
|
||||
timeout is given, use that timeout for the key; otherwise use the
|
||||
default cache timeout.
|
||||
|
||||
Return True if the value was stored, False otherwise.
|
||||
"""
|
||||
raise NotImplementedError('subclasses of BaseCache must provide an add() method')
|
||||
|
||||
async def aadd(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return await sync_to_async(self.add, thread_sensitive=True)(key, value, timeout, version)
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
"""
|
||||
Fetch a given key from the cache. If the key does not exist, return
|
||||
default, which itself defaults to None.
|
||||
"""
|
||||
raise NotImplementedError('subclasses of BaseCache must provide a get() method')
|
||||
|
||||
async def aget(self, key, default=None, version=None):
|
||||
return await sync_to_async(self.get, thread_sensitive=True)(key, default, version)
|
||||
|
||||
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
"""
|
||||
Set a value in the cache. If timeout is given, use that timeout for the
|
||||
key; otherwise use the default cache timeout.
|
||||
"""
|
||||
raise NotImplementedError('subclasses of BaseCache must provide a set() method')
|
||||
|
||||
async def aset(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return await sync_to_async(self.set, thread_sensitive=True)(key, value, timeout, version)
|
||||
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
"""
|
||||
Update the key's expiry time using timeout. Return True if successful
|
||||
or False if the key does not exist.
|
||||
"""
|
||||
raise NotImplementedError('subclasses of BaseCache must provide a touch() method')
|
||||
|
||||
async def atouch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
return await sync_to_async(self.touch, thread_sensitive=True)(key, timeout, version)
|
||||
|
||||
def delete(self, key, version=None):
|
||||
"""
|
||||
Delete a key from the cache and return whether it succeeded, failing
|
||||
silently.
|
||||
"""
|
||||
raise NotImplementedError('subclasses of BaseCache must provide a delete() method')
|
||||
|
||||
async def adelete(self, key, version=None):
|
||||
return await sync_to_async(self.delete, thread_sensitive=True)(key, version)
|
||||
|
||||
def get_many(self, keys, version=None):
|
||||
"""
|
||||
Fetch a bunch of keys from the cache. For certain backends (memcached,
|
||||
pgsql) this can be *much* faster when fetching multiple values.
|
||||
|
||||
Return a dict mapping each key in keys to its value. If the given
|
||||
key is missing, it will be missing from the response dict.
|
||||
"""
|
||||
d = {}
|
||||
for k in keys:
|
||||
val = self.get(k, self._missing_key, version=version)
|
||||
if val is not self._missing_key:
|
||||
d[k] = val
|
||||
return d
|
||||
|
||||
async def aget_many(self, keys, version=None):
|
||||
"""See get_many()."""
|
||||
d = {}
|
||||
for k in keys:
|
||||
val = await self.aget(k, self._missing_key, version=version)
|
||||
if val is not self._missing_key:
|
||||
d[k] = val
|
||||
return d
|
||||
|
||||
def get_or_set(self, key, default, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
"""
|
||||
Fetch a given key from the cache. If the key does not exist,
|
||||
add the key and set it to the default value. The default value can
|
||||
also be any callable. If timeout is given, use that timeout for the
|
||||
key; otherwise use the default cache timeout.
|
||||
|
||||
Return the value of the key stored or retrieved.
|
||||
"""
|
||||
val = self.get(key, self._missing_key, version=version)
|
||||
if val is self._missing_key:
|
||||
if callable(default):
|
||||
default = default()
|
||||
self.add(key, default, timeout=timeout, version=version)
|
||||
# Fetch the value again to avoid a race condition if another caller
|
||||
# added a value between the first get() and the add() above.
|
||||
return self.get(key, default, version=version)
|
||||
return val
|
||||
|
||||
async def aget_or_set(self, key, default, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
"""See get_or_set()."""
|
||||
val = await self.aget(key, self._missing_key, version=version)
|
||||
if val is self._missing_key:
|
||||
if callable(default):
|
||||
default = default()
|
||||
await self.aadd(key, default, timeout=timeout, version=version)
|
||||
# Fetch the value again to avoid a race condition if another caller
|
||||
# added a value between the first aget() and the aadd() above.
|
||||
return await self.aget(key, default, version=version)
|
||||
return val
|
||||
|
||||
def has_key(self, key, version=None):
|
||||
"""
|
||||
Return True if the key is in the cache and has not expired.
|
||||
"""
|
||||
return self.get(key, self._missing_key, version=version) is not self._missing_key
|
||||
|
||||
async def ahas_key(self, key, version=None):
|
||||
return (
|
||||
await self.aget(key, self._missing_key, version=version)
|
||||
is not self._missing_key
|
||||
)
|
||||
|
||||
def incr(self, key, delta=1, version=None):
|
||||
"""
|
||||
Add delta to value in the cache. If the key does not exist, raise a
|
||||
ValueError exception.
|
||||
"""
|
||||
value = self.get(key, self._missing_key, version=version)
|
||||
if value is self._missing_key:
|
||||
raise ValueError("Key '%s' not found" % key)
|
||||
new_value = value + delta
|
||||
self.set(key, new_value, version=version)
|
||||
return new_value
|
||||
|
||||
async def aincr(self, key, delta=1, version=None):
|
||||
"""See incr()."""
|
||||
value = await self.aget(key, self._missing_key, version=version)
|
||||
if value is self._missing_key:
|
||||
raise ValueError("Key '%s' not found" % key)
|
||||
new_value = value + delta
|
||||
await self.aset(key, new_value, version=version)
|
||||
return new_value
|
||||
|
||||
def decr(self, key, delta=1, version=None):
|
||||
"""
|
||||
Subtract delta from value in the cache. If the key does not exist, raise
|
||||
a ValueError exception.
|
||||
"""
|
||||
return self.incr(key, -delta, version=version)
|
||||
|
||||
async def adecr(self, key, delta=1, version=None):
|
||||
return await self.aincr(key, -delta, version=version)
|
||||
|
||||
def __contains__(self, key):
|
||||
"""
|
||||
Return True if the key is in the cache and has not expired.
|
||||
"""
|
||||
# This is a separate method, rather than just a copy of has_key(),
|
||||
# so that it always has the same functionality as has_key(), even
|
||||
# if a subclass overrides it.
|
||||
return self.has_key(key)
|
||||
|
||||
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
"""
|
||||
Set a bunch of values in the cache at once from a dict of key/value
|
||||
pairs. For certain backends (memcached), this is much more efficient
|
||||
than calling set() multiple times.
|
||||
|
||||
If timeout is given, use that timeout for the key; otherwise use the
|
||||
default cache timeout.
|
||||
|
||||
On backends that support it, return a list of keys that failed
|
||||
insertion, or an empty list if all keys were inserted successfully.
|
||||
"""
|
||||
for key, value in data.items():
|
||||
self.set(key, value, timeout=timeout, version=version)
|
||||
return []
|
||||
|
||||
async def aset_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
for key, value in data.items():
|
||||
await self.aset(key, value, timeout=timeout, version=version)
|
||||
return []
|
||||
|
||||
def delete_many(self, keys, version=None):
|
||||
"""
|
||||
Delete a bunch of values in the cache at once. For certain backends
|
||||
(memcached), this is much more efficient than calling delete() multiple
|
||||
times.
|
||||
"""
|
||||
for key in keys:
|
||||
self.delete(key, version=version)
|
||||
|
||||
async def adelete_many(self, keys, version=None):
|
||||
for key in keys:
|
||||
await self.adelete(key, version=version)
|
||||
|
||||
def clear(self):
|
||||
"""Remove *all* values from the cache at once."""
|
||||
raise NotImplementedError('subclasses of BaseCache must provide a clear() method')
|
||||
|
||||
async def aclear(self):
|
||||
return await sync_to_async(self.clear, thread_sensitive=True)()
|
||||
|
||||
def incr_version(self, key, delta=1, version=None):
|
||||
"""
|
||||
Add delta to the cache version for the supplied key. Return the new
|
||||
version.
|
||||
"""
|
||||
if version is None:
|
||||
version = self.version
|
||||
|
||||
value = self.get(key, self._missing_key, version=version)
|
||||
if value is self._missing_key:
|
||||
raise ValueError("Key '%s' not found" % key)
|
||||
|
||||
self.set(key, value, version=version + delta)
|
||||
self.delete(key, version=version)
|
||||
return version + delta
|
||||
|
||||
async def aincr_version(self, key, delta=1, version=None):
|
||||
"""See incr_version()."""
|
||||
if version is None:
|
||||
version = self.version
|
||||
|
||||
value = await self.aget(key, self._missing_key, version=version)
|
||||
if value is self._missing_key:
|
||||
raise ValueError("Key '%s' not found" % key)
|
||||
|
||||
await self.aset(key, value, version=version + delta)
|
||||
await self.adelete(key, version=version)
|
||||
return version + delta
|
||||
|
||||
def decr_version(self, key, delta=1, version=None):
|
||||
"""
|
||||
Subtract delta from the cache version for the supplied key. Return the
|
||||
new version.
|
||||
"""
|
||||
return self.incr_version(key, -delta, version)
|
||||
|
||||
async def adecr_version(self, key, delta=1, version=None):
|
||||
return await self.aincr_version(key, -delta, version)
|
||||
|
||||
def close(self, **kwargs):
|
||||
"""Close the cache connection"""
|
||||
pass
|
||||
|
||||
async def aclose(self, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
def memcache_key_warnings(key):
|
||||
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
|
||||
yield (
|
||||
'Cache key will cause errors if used with memcached: %r '
|
||||
'(longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH)
|
||||
)
|
||||
for char in key:
|
||||
if ord(char) < 33 or ord(char) == 127:
|
||||
yield (
|
||||
'Cache key contains characters that will cause errors if '
|
||||
'used with memcached: %r' % key
|
||||
)
|
||||
break
|
267
venv/Lib/site-packages/django/core/cache/backends/db.py
vendored
Normal file
267
venv/Lib/site-packages/django/core/cache/backends/db.py
vendored
Normal file
@@ -0,0 +1,267 @@
|
||||
"Database cache backend."
|
||||
import base64
|
||||
import pickle
|
||||
from datetime import datetime
|
||||
|
||||
from django.conf import settings
|
||||
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
|
||||
from django.db import DatabaseError, connections, models, router, transaction
|
||||
from django.utils import timezone
|
||||
|
||||
|
||||
class Options:
|
||||
"""A class that will quack like a Django model _meta class.
|
||||
|
||||
This allows cache operations to be controlled by the router
|
||||
"""
|
||||
def __init__(self, table):
|
||||
self.db_table = table
|
||||
self.app_label = 'django_cache'
|
||||
self.model_name = 'cacheentry'
|
||||
self.verbose_name = 'cache entry'
|
||||
self.verbose_name_plural = 'cache entries'
|
||||
self.object_name = 'CacheEntry'
|
||||
self.abstract = False
|
||||
self.managed = True
|
||||
self.proxy = False
|
||||
self.swapped = False
|
||||
|
||||
|
||||
class BaseDatabaseCache(BaseCache):
|
||||
def __init__(self, table, params):
|
||||
super().__init__(params)
|
||||
self._table = table
|
||||
|
||||
class CacheEntry:
|
||||
_meta = Options(table)
|
||||
self.cache_model_class = CacheEntry
|
||||
|
||||
|
||||
class DatabaseCache(BaseDatabaseCache):
|
||||
|
||||
# This class uses cursors provided by the database connection. This means
|
||||
# it reads expiration values as aware or naive datetimes, depending on the
|
||||
# value of USE_TZ and whether the database supports time zones. The ORM's
|
||||
# conversion and adaptation infrastructure is then used to avoid comparing
|
||||
# aware and naive datetimes accidentally.
|
||||
|
||||
pickle_protocol = pickle.HIGHEST_PROTOCOL
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
return self.get_many([key], version).get(key, default)
|
||||
|
||||
def get_many(self, keys, version=None):
|
||||
if not keys:
|
||||
return {}
|
||||
|
||||
key_map = {self.make_and_validate_key(key, version=version): key for key in keys}
|
||||
|
||||
db = router.db_for_read(self.cache_model_class)
|
||||
connection = connections[db]
|
||||
quote_name = connection.ops.quote_name
|
||||
table = quote_name(self._table)
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
'SELECT %s, %s, %s FROM %s WHERE %s IN (%s)' % (
|
||||
quote_name('cache_key'),
|
||||
quote_name('value'),
|
||||
quote_name('expires'),
|
||||
table,
|
||||
quote_name('cache_key'),
|
||||
', '.join(['%s'] * len(key_map)),
|
||||
),
|
||||
list(key_map),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
|
||||
result = {}
|
||||
expired_keys = []
|
||||
expression = models.Expression(output_field=models.DateTimeField())
|
||||
converters = (connection.ops.get_db_converters(expression) + expression.get_db_converters(connection))
|
||||
for key, value, expires in rows:
|
||||
for converter in converters:
|
||||
expires = converter(expires, expression, connection)
|
||||
if expires < timezone.now():
|
||||
expired_keys.append(key)
|
||||
else:
|
||||
value = connection.ops.process_clob(value)
|
||||
value = pickle.loads(base64.b64decode(value.encode()))
|
||||
result[key_map.get(key)] = value
|
||||
self._base_delete_many(expired_keys)
|
||||
return result
|
||||
|
||||
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
self._base_set('set', key, value, timeout)
|
||||
|
||||
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._base_set('add', key, value, timeout)
|
||||
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._base_set('touch', key, None, timeout)
|
||||
|
||||
def _base_set(self, mode, key, value, timeout=DEFAULT_TIMEOUT):
|
||||
timeout = self.get_backend_timeout(timeout)
|
||||
db = router.db_for_write(self.cache_model_class)
|
||||
connection = connections[db]
|
||||
quote_name = connection.ops.quote_name
|
||||
table = quote_name(self._table)
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute("SELECT COUNT(*) FROM %s" % table)
|
||||
num = cursor.fetchone()[0]
|
||||
now = timezone.now()
|
||||
now = now.replace(microsecond=0)
|
||||
if timeout is None:
|
||||
exp = datetime.max
|
||||
else:
|
||||
tz = timezone.utc if settings.USE_TZ else None
|
||||
exp = datetime.fromtimestamp(timeout, tz=tz)
|
||||
exp = exp.replace(microsecond=0)
|
||||
if num > self._max_entries:
|
||||
self._cull(db, cursor, now, num)
|
||||
pickled = pickle.dumps(value, self.pickle_protocol)
|
||||
# The DB column is expecting a string, so make sure the value is a
|
||||
# string, not bytes. Refs #19274.
|
||||
b64encoded = base64.b64encode(pickled).decode('latin1')
|
||||
try:
|
||||
# Note: typecasting for datetimes is needed by some 3rd party
|
||||
# database backends. All core backends work without typecasting,
|
||||
# so be careful about changes here - test suite will NOT pick
|
||||
# regressions.
|
||||
with transaction.atomic(using=db):
|
||||
cursor.execute(
|
||||
'SELECT %s, %s FROM %s WHERE %s = %%s' % (
|
||||
quote_name('cache_key'),
|
||||
quote_name('expires'),
|
||||
table,
|
||||
quote_name('cache_key'),
|
||||
),
|
||||
[key]
|
||||
)
|
||||
result = cursor.fetchone()
|
||||
|
||||
if result:
|
||||
current_expires = result[1]
|
||||
expression = models.Expression(output_field=models.DateTimeField())
|
||||
for converter in (connection.ops.get_db_converters(expression) +
|
||||
expression.get_db_converters(connection)):
|
||||
current_expires = converter(current_expires, expression, connection)
|
||||
|
||||
exp = connection.ops.adapt_datetimefield_value(exp)
|
||||
if result and mode == 'touch':
|
||||
cursor.execute(
|
||||
'UPDATE %s SET %s = %%s WHERE %s = %%s' % (
|
||||
table,
|
||||
quote_name('expires'),
|
||||
quote_name('cache_key')
|
||||
),
|
||||
[exp, key]
|
||||
)
|
||||
elif result and (mode == 'set' or (mode == 'add' and current_expires < now)):
|
||||
cursor.execute(
|
||||
'UPDATE %s SET %s = %%s, %s = %%s WHERE %s = %%s' % (
|
||||
table,
|
||||
quote_name('value'),
|
||||
quote_name('expires'),
|
||||
quote_name('cache_key'),
|
||||
),
|
||||
[b64encoded, exp, key]
|
||||
)
|
||||
elif mode != 'touch':
|
||||
cursor.execute(
|
||||
'INSERT INTO %s (%s, %s, %s) VALUES (%%s, %%s, %%s)' % (
|
||||
table,
|
||||
quote_name('cache_key'),
|
||||
quote_name('value'),
|
||||
quote_name('expires'),
|
||||
),
|
||||
[key, b64encoded, exp]
|
||||
)
|
||||
else:
|
||||
return False # touch failed.
|
||||
except DatabaseError:
|
||||
# To be threadsafe, updates/inserts are allowed to fail silently
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def delete(self, key, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._base_delete_many([key])
|
||||
|
||||
def delete_many(self, keys, version=None):
|
||||
keys = [self.make_and_validate_key(key, version=version) for key in keys]
|
||||
self._base_delete_many(keys)
|
||||
|
||||
def _base_delete_many(self, keys):
|
||||
if not keys:
|
||||
return False
|
||||
|
||||
db = router.db_for_write(self.cache_model_class)
|
||||
connection = connections[db]
|
||||
quote_name = connection.ops.quote_name
|
||||
table = quote_name(self._table)
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
'DELETE FROM %s WHERE %s IN (%s)' % (
|
||||
table,
|
||||
quote_name('cache_key'),
|
||||
', '.join(['%s'] * len(keys)),
|
||||
),
|
||||
keys,
|
||||
)
|
||||
return bool(cursor.rowcount)
|
||||
|
||||
def has_key(self, key, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
|
||||
db = router.db_for_read(self.cache_model_class)
|
||||
connection = connections[db]
|
||||
quote_name = connection.ops.quote_name
|
||||
|
||||
now = timezone.now().replace(microsecond=0, tzinfo=None)
|
||||
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(
|
||||
'SELECT %s FROM %s WHERE %s = %%s and expires > %%s' % (
|
||||
quote_name('cache_key'),
|
||||
quote_name(self._table),
|
||||
quote_name('cache_key'),
|
||||
),
|
||||
[key, connection.ops.adapt_datetimefield_value(now)]
|
||||
)
|
||||
return cursor.fetchone() is not None
|
||||
|
||||
def _cull(self, db, cursor, now, num):
|
||||
if self._cull_frequency == 0:
|
||||
self.clear()
|
||||
else:
|
||||
connection = connections[db]
|
||||
table = connection.ops.quote_name(self._table)
|
||||
cursor.execute("DELETE FROM %s WHERE expires < %%s" % table,
|
||||
[connection.ops.adapt_datetimefield_value(now)])
|
||||
deleted_count = cursor.rowcount
|
||||
remaining_num = num - deleted_count
|
||||
if remaining_num > self._max_entries:
|
||||
cull_num = remaining_num // self._cull_frequency
|
||||
cursor.execute(
|
||||
connection.ops.cache_key_culling_sql() % table,
|
||||
[cull_num])
|
||||
last_cache_key = cursor.fetchone()
|
||||
if last_cache_key:
|
||||
cursor.execute(
|
||||
'DELETE FROM %s WHERE cache_key < %%s' % table,
|
||||
[last_cache_key[0]],
|
||||
)
|
||||
|
||||
def clear(self):
|
||||
db = router.db_for_write(self.cache_model_class)
|
||||
connection = connections[db]
|
||||
table = connection.ops.quote_name(self._table)
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute('DELETE FROM %s' % table)
|
34
venv/Lib/site-packages/django/core/cache/backends/dummy.py
vendored
Normal file
34
venv/Lib/site-packages/django/core/cache/backends/dummy.py
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
"Dummy cache backend"
|
||||
|
||||
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
|
||||
|
||||
|
||||
class DummyCache(BaseCache):
|
||||
def __init__(self, host, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
self.make_and_validate_key(key, version=version)
|
||||
return True
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
self.make_and_validate_key(key, version=version)
|
||||
return default
|
||||
|
||||
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
self.make_and_validate_key(key, version=version)
|
||||
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
self.make_and_validate_key(key, version=version)
|
||||
return False
|
||||
|
||||
def delete(self, key, version=None):
|
||||
self.make_and_validate_key(key, version=version)
|
||||
return False
|
||||
|
||||
def has_key(self, key, version=None):
|
||||
self.make_and_validate_key(key, version=version)
|
||||
return False
|
||||
|
||||
def clear(self):
|
||||
pass
|
163
venv/Lib/site-packages/django/core/cache/backends/filebased.py
vendored
Normal file
163
venv/Lib/site-packages/django/core/cache/backends/filebased.py
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
"File-based cache backend"
|
||||
import glob
|
||||
import hashlib
|
||||
import os
|
||||
import pickle
|
||||
import random
|
||||
import tempfile
|
||||
import time
|
||||
import zlib
|
||||
|
||||
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
|
||||
from django.core.files import locks
|
||||
from django.core.files.move import file_move_safe
|
||||
|
||||
|
||||
class FileBasedCache(BaseCache):
|
||||
cache_suffix = '.djcache'
|
||||
pickle_protocol = pickle.HIGHEST_PROTOCOL
|
||||
|
||||
def __init__(self, dir, params):
|
||||
super().__init__(params)
|
||||
self._dir = os.path.abspath(dir)
|
||||
self._createdir()
|
||||
|
||||
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
if self.has_key(key, version):
|
||||
return False
|
||||
self.set(key, value, timeout, version)
|
||||
return True
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
fname = self._key_to_file(key, version)
|
||||
try:
|
||||
with open(fname, 'rb') as f:
|
||||
if not self._is_expired(f):
|
||||
return pickle.loads(zlib.decompress(f.read()))
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
return default
|
||||
|
||||
def _write_content(self, file, timeout, value):
|
||||
expiry = self.get_backend_timeout(timeout)
|
||||
file.write(pickle.dumps(expiry, self.pickle_protocol))
|
||||
file.write(zlib.compress(pickle.dumps(value, self.pickle_protocol)))
|
||||
|
||||
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
self._createdir() # Cache dir can be deleted at any time.
|
||||
fname = self._key_to_file(key, version)
|
||||
self._cull() # make some room if necessary
|
||||
fd, tmp_path = tempfile.mkstemp(dir=self._dir)
|
||||
renamed = False
|
||||
try:
|
||||
with open(fd, 'wb') as f:
|
||||
self._write_content(f, timeout, value)
|
||||
file_move_safe(tmp_path, fname, allow_overwrite=True)
|
||||
renamed = True
|
||||
finally:
|
||||
if not renamed:
|
||||
os.remove(tmp_path)
|
||||
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
try:
|
||||
with open(self._key_to_file(key, version), 'r+b') as f:
|
||||
try:
|
||||
locks.lock(f, locks.LOCK_EX)
|
||||
if self._is_expired(f):
|
||||
return False
|
||||
else:
|
||||
previous_value = pickle.loads(zlib.decompress(f.read()))
|
||||
f.seek(0)
|
||||
self._write_content(f, timeout, previous_value)
|
||||
return True
|
||||
finally:
|
||||
locks.unlock(f)
|
||||
except FileNotFoundError:
|
||||
return False
|
||||
|
||||
def delete(self, key, version=None):
|
||||
return self._delete(self._key_to_file(key, version))
|
||||
|
||||
def _delete(self, fname):
|
||||
if not fname.startswith(self._dir) or not os.path.exists(fname):
|
||||
return False
|
||||
try:
|
||||
os.remove(fname)
|
||||
except FileNotFoundError:
|
||||
# The file may have been removed by another process.
|
||||
return False
|
||||
return True
|
||||
|
||||
def has_key(self, key, version=None):
|
||||
fname = self._key_to_file(key, version)
|
||||
if os.path.exists(fname):
|
||||
with open(fname, 'rb') as f:
|
||||
return not self._is_expired(f)
|
||||
return False
|
||||
|
||||
def _cull(self):
|
||||
"""
|
||||
Remove random cache entries if max_entries is reached at a ratio
|
||||
of num_entries / cull_frequency. A value of 0 for CULL_FREQUENCY means
|
||||
that the entire cache will be purged.
|
||||
"""
|
||||
filelist = self._list_cache_files()
|
||||
num_entries = len(filelist)
|
||||
if num_entries < self._max_entries:
|
||||
return # return early if no culling is required
|
||||
if self._cull_frequency == 0:
|
||||
return self.clear() # Clear the cache when CULL_FREQUENCY = 0
|
||||
# Delete a random selection of entries
|
||||
filelist = random.sample(filelist,
|
||||
int(num_entries / self._cull_frequency))
|
||||
for fname in filelist:
|
||||
self._delete(fname)
|
||||
|
||||
def _createdir(self):
|
||||
# Set the umask because os.makedirs() doesn't apply the "mode" argument
|
||||
# to intermediate-level directories.
|
||||
old_umask = os.umask(0o077)
|
||||
try:
|
||||
os.makedirs(self._dir, 0o700, exist_ok=True)
|
||||
finally:
|
||||
os.umask(old_umask)
|
||||
|
||||
def _key_to_file(self, key, version=None):
|
||||
"""
|
||||
Convert a key into a cache file path. Basically this is the
|
||||
root cache path joined with the md5sum of the key and a suffix.
|
||||
"""
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return os.path.join(self._dir, ''.join(
|
||||
[hashlib.md5(key.encode()).hexdigest(), self.cache_suffix]))
|
||||
|
||||
def clear(self):
|
||||
"""
|
||||
Remove all the cache files.
|
||||
"""
|
||||
for fname in self._list_cache_files():
|
||||
self._delete(fname)
|
||||
|
||||
def _is_expired(self, f):
|
||||
"""
|
||||
Take an open cache file `f` and delete it if it's expired.
|
||||
"""
|
||||
try:
|
||||
exp = pickle.load(f)
|
||||
except EOFError:
|
||||
exp = 0 # An empty file is considered expired.
|
||||
if exp is not None and exp < time.time():
|
||||
f.close() # On Windows a file has to be closed before deleting
|
||||
self._delete(f.name)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _list_cache_files(self):
|
||||
"""
|
||||
Get a list of paths to all the cache files. These are all the files
|
||||
in the root cache dir that end on the cache_suffix.
|
||||
"""
|
||||
return [
|
||||
os.path.join(self._dir, fname)
|
||||
for fname in glob.glob1(self._dir, '*%s' % self.cache_suffix)
|
||||
]
|
117
venv/Lib/site-packages/django/core/cache/backends/locmem.py
vendored
Normal file
117
venv/Lib/site-packages/django/core/cache/backends/locmem.py
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
"Thread-safe in-memory cache backend."
|
||||
import pickle
|
||||
import time
|
||||
from collections import OrderedDict
|
||||
from threading import Lock
|
||||
|
||||
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
|
||||
|
||||
# Global in-memory store of cache data. Keyed by name, to provide
|
||||
# multiple named local memory caches.
|
||||
_caches = {}
|
||||
_expire_info = {}
|
||||
_locks = {}
|
||||
|
||||
|
||||
class LocMemCache(BaseCache):
|
||||
pickle_protocol = pickle.HIGHEST_PROTOCOL
|
||||
|
||||
def __init__(self, name, params):
|
||||
super().__init__(params)
|
||||
self._cache = _caches.setdefault(name, OrderedDict())
|
||||
self._expire_info = _expire_info.setdefault(name, {})
|
||||
self._lock = _locks.setdefault(name, Lock())
|
||||
|
||||
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
pickled = pickle.dumps(value, self.pickle_protocol)
|
||||
with self._lock:
|
||||
if self._has_expired(key):
|
||||
self._set(key, pickled, timeout)
|
||||
return True
|
||||
return False
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
with self._lock:
|
||||
if self._has_expired(key):
|
||||
self._delete(key)
|
||||
return default
|
||||
pickled = self._cache[key]
|
||||
self._cache.move_to_end(key, last=False)
|
||||
return pickle.loads(pickled)
|
||||
|
||||
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
|
||||
if len(self._cache) >= self._max_entries:
|
||||
self._cull()
|
||||
self._cache[key] = value
|
||||
self._cache.move_to_end(key, last=False)
|
||||
self._expire_info[key] = self.get_backend_timeout(timeout)
|
||||
|
||||
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
pickled = pickle.dumps(value, self.pickle_protocol)
|
||||
with self._lock:
|
||||
self._set(key, pickled, timeout)
|
||||
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
with self._lock:
|
||||
if self._has_expired(key):
|
||||
return False
|
||||
self._expire_info[key] = self.get_backend_timeout(timeout)
|
||||
return True
|
||||
|
||||
def incr(self, key, delta=1, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
with self._lock:
|
||||
if self._has_expired(key):
|
||||
self._delete(key)
|
||||
raise ValueError("Key '%s' not found" % key)
|
||||
pickled = self._cache[key]
|
||||
value = pickle.loads(pickled)
|
||||
new_value = value + delta
|
||||
pickled = pickle.dumps(new_value, self.pickle_protocol)
|
||||
self._cache[key] = pickled
|
||||
self._cache.move_to_end(key, last=False)
|
||||
return new_value
|
||||
|
||||
def has_key(self, key, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
with self._lock:
|
||||
if self._has_expired(key):
|
||||
self._delete(key)
|
||||
return False
|
||||
return True
|
||||
|
||||
def _has_expired(self, key):
|
||||
exp = self._expire_info.get(key, -1)
|
||||
return exp is not None and exp <= time.time()
|
||||
|
||||
def _cull(self):
|
||||
if self._cull_frequency == 0:
|
||||
self._cache.clear()
|
||||
self._expire_info.clear()
|
||||
else:
|
||||
count = len(self._cache) // self._cull_frequency
|
||||
for i in range(count):
|
||||
key, _ = self._cache.popitem()
|
||||
del self._expire_info[key]
|
||||
|
||||
def _delete(self, key):
|
||||
try:
|
||||
del self._cache[key]
|
||||
del self._expire_info[key]
|
||||
except KeyError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def delete(self, key, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
with self._lock:
|
||||
return self._delete(key)
|
||||
|
||||
def clear(self):
|
||||
with self._lock:
|
||||
self._cache.clear()
|
||||
self._expire_info.clear()
|
212
venv/Lib/site-packages/django/core/cache/backends/memcached.py
vendored
Normal file
212
venv/Lib/site-packages/django/core/cache/backends/memcached.py
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
"Memcached cache backend"
|
||||
|
||||
import pickle
|
||||
import re
|
||||
import time
|
||||
import warnings
|
||||
|
||||
from django.core.cache.backends.base import (
|
||||
DEFAULT_TIMEOUT, BaseCache, InvalidCacheKey, memcache_key_warnings,
|
||||
)
|
||||
from django.utils.deprecation import RemovedInDjango41Warning
|
||||
from django.utils.functional import cached_property
|
||||
|
||||
|
||||
class BaseMemcachedCache(BaseCache):
|
||||
def __init__(self, server, params, library, value_not_found_exception):
|
||||
super().__init__(params)
|
||||
if isinstance(server, str):
|
||||
self._servers = re.split('[;,]', server)
|
||||
else:
|
||||
self._servers = server
|
||||
|
||||
# Exception type raised by the underlying client library for a
|
||||
# nonexistent key.
|
||||
self.LibraryValueNotFoundException = value_not_found_exception
|
||||
|
||||
self._lib = library
|
||||
self._class = library.Client
|
||||
self._options = params.get('OPTIONS') or {}
|
||||
|
||||
@property
|
||||
def client_servers(self):
|
||||
return self._servers
|
||||
|
||||
@cached_property
|
||||
def _cache(self):
|
||||
"""
|
||||
Implement transparent thread-safe access to a memcached client.
|
||||
"""
|
||||
return self._class(self.client_servers, **self._options)
|
||||
|
||||
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
|
||||
"""
|
||||
Memcached deals with long (> 30 days) timeouts in a special
|
||||
way. Call this function to obtain a safe value for your timeout.
|
||||
"""
|
||||
if timeout == DEFAULT_TIMEOUT:
|
||||
timeout = self.default_timeout
|
||||
|
||||
if timeout is None:
|
||||
# Using 0 in memcache sets a non-expiring timeout.
|
||||
return 0
|
||||
elif int(timeout) == 0:
|
||||
# Other cache backends treat 0 as set-and-expire. To achieve this
|
||||
# in memcache backends, a negative timeout must be passed.
|
||||
timeout = -1
|
||||
|
||||
if timeout > 2592000: # 60*60*24*30, 30 days
|
||||
# See https://github.com/memcached/memcached/wiki/Programming#expiration
|
||||
# "Expiration times can be set from 0, meaning "never expire", to
|
||||
# 30 days. Any time higher than 30 days is interpreted as a Unix
|
||||
# timestamp date. If you want to expire an object on January 1st of
|
||||
# next year, this is how you do that."
|
||||
#
|
||||
# This means that we have to switch to absolute timestamps.
|
||||
timeout += int(time.time())
|
||||
return int(timeout)
|
||||
|
||||
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._cache.add(key, value, self.get_backend_timeout(timeout))
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._cache.get(key, default)
|
||||
|
||||
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
if not self._cache.set(key, value, self.get_backend_timeout(timeout)):
|
||||
# make sure the key doesn't keep its old value in case of failure to set (memcached's 1MB limit)
|
||||
self._cache.delete(key)
|
||||
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return bool(self._cache.touch(key, self.get_backend_timeout(timeout)))
|
||||
|
||||
def delete(self, key, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return bool(self._cache.delete(key))
|
||||
|
||||
def get_many(self, keys, version=None):
|
||||
key_map = {self.make_and_validate_key(key, version=version): key for key in keys}
|
||||
ret = self._cache.get_multi(key_map.keys())
|
||||
return {key_map[k]: v for k, v in ret.items()}
|
||||
|
||||
def close(self, **kwargs):
|
||||
# Many clients don't clean up connections properly.
|
||||
self._cache.disconnect_all()
|
||||
|
||||
def incr(self, key, delta=1, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
try:
|
||||
# Memcached doesn't support negative delta.
|
||||
if delta < 0:
|
||||
val = self._cache.decr(key, -delta)
|
||||
else:
|
||||
val = self._cache.incr(key, delta)
|
||||
# Normalize an exception raised by the underlying client library to
|
||||
# ValueError in the event of a nonexistent key when calling
|
||||
# incr()/decr().
|
||||
except self.LibraryValueNotFoundException:
|
||||
val = None
|
||||
if val is None:
|
||||
raise ValueError("Key '%s' not found" % key)
|
||||
return val
|
||||
|
||||
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
safe_data = {}
|
||||
original_keys = {}
|
||||
for key, value in data.items():
|
||||
safe_key = self.make_and_validate_key(key, version=version)
|
||||
safe_data[safe_key] = value
|
||||
original_keys[safe_key] = key
|
||||
failed_keys = self._cache.set_multi(safe_data, self.get_backend_timeout(timeout))
|
||||
return [original_keys[k] for k in failed_keys]
|
||||
|
||||
def delete_many(self, keys, version=None):
|
||||
keys = [self.make_and_validate_key(key, version=version) for key in keys]
|
||||
self._cache.delete_multi(keys)
|
||||
|
||||
def clear(self):
|
||||
self._cache.flush_all()
|
||||
|
||||
def validate_key(self, key):
|
||||
for warning in memcache_key_warnings(key):
|
||||
raise InvalidCacheKey(warning)
|
||||
|
||||
|
||||
class MemcachedCache(BaseMemcachedCache):
|
||||
"An implementation of a cache binding using python-memcached"
|
||||
|
||||
# python-memcached doesn't support default values in get().
|
||||
# https://github.com/linsomniac/python-memcached/issues/159
|
||||
_missing_key = None
|
||||
|
||||
def __init__(self, server, params):
|
||||
warnings.warn(
|
||||
'MemcachedCache is deprecated in favor of PyMemcacheCache and '
|
||||
'PyLibMCCache.',
|
||||
RemovedInDjango41Warning, stacklevel=2,
|
||||
)
|
||||
# python-memcached ≥ 1.45 returns None for a nonexistent key in
|
||||
# incr/decr(), python-memcached < 1.45 raises ValueError.
|
||||
import memcache
|
||||
super().__init__(server, params, library=memcache, value_not_found_exception=ValueError)
|
||||
self._options = {'pickleProtocol': pickle.HIGHEST_PROTOCOL, **self._options}
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
val = self._cache.get(key)
|
||||
# python-memcached doesn't support default values in get().
|
||||
# https://github.com/linsomniac/python-memcached/issues/159
|
||||
# Remove this method if that issue is fixed.
|
||||
if val is None:
|
||||
return default
|
||||
return val
|
||||
|
||||
def delete(self, key, version=None):
|
||||
# python-memcached's delete() returns True when key doesn't exist.
|
||||
# https://github.com/linsomniac/python-memcached/issues/170
|
||||
# Call _deletetouch() without the NOT_FOUND in expected results.
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return bool(self._cache._deletetouch([b'DELETED'], 'delete', key))
|
||||
|
||||
|
||||
class PyLibMCCache(BaseMemcachedCache):
|
||||
"An implementation of a cache binding using pylibmc"
|
||||
def __init__(self, server, params):
|
||||
import pylibmc
|
||||
super().__init__(server, params, library=pylibmc, value_not_found_exception=pylibmc.NotFound)
|
||||
|
||||
@property
|
||||
def client_servers(self):
|
||||
output = []
|
||||
for server in self._servers:
|
||||
output.append(server[5:] if server.startswith('unix:') else server)
|
||||
return output
|
||||
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
if timeout == 0:
|
||||
return self._cache.delete(key)
|
||||
return self._cache.touch(key, self.get_backend_timeout(timeout))
|
||||
|
||||
def close(self, **kwargs):
|
||||
# libmemcached manages its own connections. Don't call disconnect_all()
|
||||
# as it resets the failover state and creates unnecessary reconnects.
|
||||
pass
|
||||
|
||||
|
||||
class PyMemcacheCache(BaseMemcachedCache):
|
||||
"""An implementation of a cache binding using pymemcache."""
|
||||
def __init__(self, server, params):
|
||||
import pymemcache.serde
|
||||
super().__init__(server, params, library=pymemcache, value_not_found_exception=KeyError)
|
||||
self._class = self._lib.HashClient
|
||||
self._options = {
|
||||
'allow_unicode_keys': True,
|
||||
'default_noreply': False,
|
||||
'serde': pymemcache.serde.pickle_serde,
|
||||
**self._options,
|
||||
}
|
224
venv/Lib/site-packages/django/core/cache/backends/redis.py
vendored
Normal file
224
venv/Lib/site-packages/django/core/cache/backends/redis.py
vendored
Normal file
@@ -0,0 +1,224 @@
|
||||
"""Redis cache backend."""
|
||||
|
||||
import random
|
||||
import re
|
||||
|
||||
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
|
||||
from django.core.serializers.base import PickleSerializer
|
||||
from django.utils.functional import cached_property
|
||||
from django.utils.module_loading import import_string
|
||||
|
||||
|
||||
class RedisSerializer(PickleSerializer):
|
||||
def dumps(self, obj):
|
||||
if isinstance(obj, int):
|
||||
return obj
|
||||
return super().dumps(obj)
|
||||
|
||||
def loads(self, data):
|
||||
try:
|
||||
return int(data)
|
||||
except ValueError:
|
||||
return super().loads(data)
|
||||
|
||||
|
||||
class RedisCacheClient:
|
||||
def __init__(
|
||||
self,
|
||||
servers,
|
||||
serializer=None,
|
||||
db=None,
|
||||
pool_class=None,
|
||||
parser_class=None,
|
||||
):
|
||||
import redis
|
||||
|
||||
self._lib = redis
|
||||
self._servers = servers
|
||||
self._pools = {}
|
||||
|
||||
self._client = self._lib.Redis
|
||||
|
||||
if isinstance(pool_class, str):
|
||||
pool_class = import_string(pool_class)
|
||||
self._pool_class = pool_class or self._lib.ConnectionPool
|
||||
|
||||
if isinstance(serializer, str):
|
||||
serializer = import_string(serializer)
|
||||
if callable(serializer):
|
||||
serializer = serializer()
|
||||
self._serializer = serializer or RedisSerializer()
|
||||
|
||||
if isinstance(parser_class, str):
|
||||
parser_class = import_string(parser_class)
|
||||
parser_class = parser_class or self._lib.connection.DefaultParser
|
||||
|
||||
self._pool_options = {'parser_class': parser_class, 'db': db}
|
||||
|
||||
def _get_connection_pool_index(self, write):
|
||||
# Write to the first server. Read from other servers if there are more,
|
||||
# otherwise read from the first server.
|
||||
if write or len(self._servers) == 1:
|
||||
return 0
|
||||
return random.randint(1, len(self._servers) - 1)
|
||||
|
||||
def _get_connection_pool(self, write):
|
||||
index = self._get_connection_pool_index(write)
|
||||
if index not in self._pools:
|
||||
self._pools[index] = self._pool_class.from_url(
|
||||
self._servers[index], **self._pool_options,
|
||||
)
|
||||
return self._pools[index]
|
||||
|
||||
def get_client(self, key=None, *, write=False):
|
||||
# key is used so that the method signature remains the same and custom
|
||||
# cache client can be implemented which might require the key to select
|
||||
# the server, e.g. sharding.
|
||||
pool = self._get_connection_pool(write)
|
||||
return self._client(connection_pool=pool)
|
||||
|
||||
def add(self, key, value, timeout):
|
||||
client = self.get_client(key, write=True)
|
||||
value = self._serializer.dumps(value)
|
||||
|
||||
if timeout == 0:
|
||||
if ret := bool(client.set(key, value, nx=True)):
|
||||
client.delete(key)
|
||||
return ret
|
||||
else:
|
||||
return bool(client.set(key, value, ex=timeout, nx=True))
|
||||
|
||||
def get(self, key, default):
|
||||
client = self.get_client(key)
|
||||
value = client.get(key)
|
||||
return default if value is None else self._serializer.loads(value)
|
||||
|
||||
def set(self, key, value, timeout):
|
||||
client = self.get_client(key, write=True)
|
||||
value = self._serializer.dumps(value)
|
||||
if timeout == 0:
|
||||
client.delete(key)
|
||||
else:
|
||||
client.set(key, value, ex=timeout)
|
||||
|
||||
def touch(self, key, timeout):
|
||||
client = self.get_client(key, write=True)
|
||||
if timeout is None:
|
||||
return bool(client.persist(key))
|
||||
else:
|
||||
return bool(client.expire(key, timeout))
|
||||
|
||||
def delete(self, key):
|
||||
client = self.get_client(key, write=True)
|
||||
return bool(client.delete(key))
|
||||
|
||||
def get_many(self, keys):
|
||||
client = self.get_client(None)
|
||||
ret = client.mget(keys)
|
||||
return {
|
||||
k: self._serializer.loads(v) for k, v in zip(keys, ret) if v is not None
|
||||
}
|
||||
|
||||
def has_key(self, key):
|
||||
client = self.get_client(key)
|
||||
return bool(client.exists(key))
|
||||
|
||||
def incr(self, key, delta):
|
||||
client = self.get_client(key)
|
||||
if not client.exists(key):
|
||||
raise ValueError("Key '%s' not found." % key)
|
||||
return client.incr(key, delta)
|
||||
|
||||
def set_many(self, data, timeout):
|
||||
client = self.get_client(None, write=True)
|
||||
pipeline = client.pipeline()
|
||||
pipeline.mset({k: self._serializer.dumps(v) for k, v in data.items()})
|
||||
|
||||
if timeout is not None:
|
||||
# Setting timeout for each key as redis does not support timeout
|
||||
# with mset().
|
||||
for key in data:
|
||||
pipeline.expire(key, timeout)
|
||||
pipeline.execute()
|
||||
|
||||
def delete_many(self, keys):
|
||||
client = self.get_client(None, write=True)
|
||||
client.delete(*keys)
|
||||
|
||||
def clear(self):
|
||||
client = self.get_client(None, write=True)
|
||||
return bool(client.flushdb())
|
||||
|
||||
|
||||
class RedisCache(BaseCache):
|
||||
def __init__(self, server, params):
|
||||
super().__init__(params)
|
||||
if isinstance(server, str):
|
||||
self._servers = re.split('[;,]', server)
|
||||
else:
|
||||
self._servers = server
|
||||
|
||||
self._class = RedisCacheClient
|
||||
self._options = params.get('OPTIONS', {})
|
||||
|
||||
@cached_property
|
||||
def _cache(self):
|
||||
return self._class(self._servers, **self._options)
|
||||
|
||||
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
|
||||
if timeout == DEFAULT_TIMEOUT:
|
||||
timeout = self.default_timeout
|
||||
# The key will be made persistent if None used as a timeout.
|
||||
# Non-positive values will cause the key to be deleted.
|
||||
return None if timeout is None else max(0, int(timeout))
|
||||
|
||||
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._cache.add(key, value, self.get_backend_timeout(timeout))
|
||||
|
||||
def get(self, key, default=None, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._cache.get(key, default)
|
||||
|
||||
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
self._cache.set(key, value, self.get_backend_timeout(timeout))
|
||||
|
||||
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._cache.touch(key, self.get_backend_timeout(timeout))
|
||||
|
||||
def delete(self, key, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._cache.delete(key)
|
||||
|
||||
def get_many(self, keys, version=None):
|
||||
key_map = {self.make_and_validate_key(key, version=version): key for key in keys}
|
||||
ret = self._cache.get_many(key_map.keys())
|
||||
return {key_map[k]: v for k, v in ret.items()}
|
||||
|
||||
def has_key(self, key, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._cache.has_key(key)
|
||||
|
||||
def incr(self, key, delta=1, version=None):
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
return self._cache.incr(key, delta)
|
||||
|
||||
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
|
||||
safe_data = {}
|
||||
for key, value in data.items():
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
safe_data[key] = value
|
||||
self._cache.set_many(safe_data, self.get_backend_timeout(timeout))
|
||||
return []
|
||||
|
||||
def delete_many(self, keys, version=None):
|
||||
safe_keys = []
|
||||
for key in keys:
|
||||
key = self.make_and_validate_key(key, version=version)
|
||||
safe_keys.append(key)
|
||||
self._cache.delete_many(safe_keys)
|
||||
|
||||
def clear(self):
|
||||
return self._cache.clear()
|
12
venv/Lib/site-packages/django/core/cache/utils.py
vendored
Normal file
12
venv/Lib/site-packages/django/core/cache/utils.py
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
import hashlib
|
||||
|
||||
TEMPLATE_FRAGMENT_KEY_TEMPLATE = 'template.cache.%s.%s'
|
||||
|
||||
|
||||
def make_template_fragment_key(fragment_name, vary_on=None):
|
||||
hasher = hashlib.md5()
|
||||
if vary_on is not None:
|
||||
for arg in vary_on:
|
||||
hasher.update(str(arg).encode())
|
||||
hasher.update(b':')
|
||||
return TEMPLATE_FRAGMENT_KEY_TEMPLATE % (fragment_name, hasher.hexdigest())
|
Reference in New Issue
Block a user