我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用django.core.cache.cache.add()。
def test_binary_string(self): # Binary strings should be cacheable cache = self.cache from zlib import compress, decompress value = 'value_to_be_compressed' compressed_value = compress(value.encode()) # Test set cache.set('binary1', compressed_value) compressed_result = cache.get('binary1') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test add cache.add('binary1-add', compressed_value) compressed_result = cache.get('binary1-add') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test set_many cache.set_many({'binary1-set_many': compressed_value}) compressed_result = cache.get('binary1-set_many') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode())
def test_forever_timeout(self): """ Passing in None into timeout results in a value that is cached forever """ cache = self.cache cache.set('key1', 'eggs', None) self.assertEqual(cache.get('key1'), 'eggs') cache.add('key2', 'ham', None) self.assertEqual(cache.get('key2'), 'ham') added = cache.add('key1', 'new eggs', None) self.assertIs(added, False) self.assertEqual(cache.get('key1'), 'eggs') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque')
def single_instance_task(timeout): def task_exc(func): @functools.wraps(func) def wrapper(*args, **kwargs): lock_id = "celery-single-instance-" + func.__name__ def acquire_lock(): return cache.add(lock_id, "true", timeout) def release_lock(): return cache.delete(lock_id) if acquire_lock(): try: func(*args, **kwargs) finally: release_lock() return wrapper return task_exc
def get(self, request, **kwargs): api_kwargs = self.get_wp_api_kwargs(**kwargs) page = api_kwargs.get('page_number', 1) context = cache.get( "blog_category_context_" + kwargs.get('slug') + self.blog_language + '_page_' + str(page)) context = self.get_context_data(**kwargs) if\ context is None else context cache.add( "blog_category_context_" + kwargs.get('slug') + self.blog_language + '_page_' + str(page), context, cache_time) category_name = None if context['blogs']: for item in context['blogs'][0]['terms']['category']: if str(item['slug']) == kwargs.get('slug'): context['category'] = item category_name = item['name'] context['category_name'] = category_name return render(request, self.template_name, context)
def get(self, request, **kwargs): api_kwargs = self.get_wp_api_kwargs(**kwargs) page = api_kwargs.get('page_number', 1) context = cache.get("blog_tag_context_" + kwargs.get('slug') + self.blog_language + '_page_' + str(page)) context = self.get_context_data(**kwargs) if\ context is None else context cache.add("blog_tag_context_" + kwargs.get('slug') + self.blog_language + '_page_' + str(page), context, cache_time) category_name = None if context['blogs']: for item in context['blogs'][0]['terms']['post_tag']: if str(item['slug']) == kwargs.get('slug'): context['tag'] = item category_name = item['name'] context['tag_name'] = category_name return render(request, self.template_name, context)
def get(self, request, **kwargs): api_kwargs = self.get_wp_api_kwargs(**kwargs) page = api_kwargs.get('page_number', 1) context = cache.get("blog_author_context_" + kwargs.get('slug') + self.blog_language + '_page_' + str(page)) context = self.get_context_data(**kwargs) if\ context is None else context cache.add("blog_author_context_" + kwargs.get('slug') + self.blog_language + '_page_' + str(page), context, cache_time) author_name = None if context['blogs']: if str(context[ 'blogs'][0]['author']['slug']) == kwargs.get('slug'): author_name = kwargs.get('slug') context['author_name'] = author_name return render(request, self.template_name, context)
def get_lock(key, prefix='task'): """Get a lock for a specific key (usually email address) Needs to be done with a timeout because SFDC needs some time to populate its indexes before the duplicate protection works and queries will return results. Releasing the lock right after the task was run still allowed dupes. Does nothing if you get the lock, and raises RetryTask if not. """ if not settings.TASK_LOCKING_ENABLE: return lock_key = 'basket-{}-{}'.format(prefix, key) lock_key = sha256(lock_key).hexdigest() got_lock = cache.add(lock_key, True, settings.TASK_LOCK_TIMEOUT) if not got_lock: statsd.incr('news.tasks.get_lock.no_lock_retry') raise RetryTask('Could not acquire lock')
def sfdc_add_update(update_data, user_data=None): # for use with maintenance mode only # TODO remove after maintenance is over and queue is processed if user_data: sfdc.update(user_data, update_data) else: try: sfdc.add(update_data) except sfapi.SalesforceMalformedRequest as e: # noqa # possibly a duplicate email. try the update below. user_data = get_user_data(email=update_data['email'], extra_fields=['id']) if user_data: # we have a user, delete generated token # and continue with an update update_data.pop('token', None) sfdc.update(user_data, update_data) else: # still no user, try the add one more time sfdc.add(update_data)
def update_custom_unsub(token, reason): """Record a user's custom unsubscribe reason.""" get_lock(token) try: sfdc.update({'token': token}, {'reason': reason}) except sfapi.SalesforceMalformedRequest: # likely the record can't be found. try the DoI DE. user = get_sfmc_doi_user(token) if user and user.get('email'): get_lock(user['email']) user['reason'] = reason try: sfdc.add(user) except sfapi.SalesforceMalformedRequest: # probably already know the email address sfdc.update({'email': user['email']}, user)
def __init__(self): """ Middleware init is called once per server on startup - do the heavy lifting here. """ # If disabled or not enabled raise MiddleWareNotUsed so django # processes next middleware. self.ENABLED = getattr(settings, 'BANS_ENABLED', False) self.DEBUG = getattr(settings, 'BANS_DEBUG', False) if not self.ENABLED: raise MiddlewareNotUsed("bans are not enabled via settings.py") if self.DEBUG: print "Bans status = enabled" # Populate various 'banish' buckets for ban in Ban.objects.all(): if self.DEBUG: print ban cache.add('BAN:'+ban.address, '1', None)
def lock(self): if not self.scratch: cache.add(self.lock_id, 'manually locked', timeout=None)
def test_eviction(self): cache = self.cache for r in range(50): cache.set(r, r) # The ordering below is very strict. # set will evict cache.set('a', 'a') self.assertIn('a', cache) self.assertIn(49, cache) self.assertNotIn(0, cache) # In promotes self.assertIn(1, cache) cache.set('b', 'b') self.assertNotIn(2, cache) # Add will evict self.assertFalse(cache.add('a', 'a')) self.assertIn(1, cache) self.assertTrue(cache.add('c', 'c')) self.assertNotIn(3, cache) # Get does not evict self.assertEqual(cache.get('c'), 'c') self.assertIn(4, cache) self.assertIsNone(cache.get('d')) self.assertIn(5, cache) # Get promotes self.assertEqual(cache.get(6), 6) cache.set('d', 'd') self.assertIn(6, cache) self.assertNotIn(7, cache)
def test_add(self): # A key can be added to a cache cache = self.cache cache.add("addkey1", "value") result = cache.add("addkey1", "newvalue") self.assertFalse(result) self.assertEqual(cache.get("addkey1"), "value")
def test_expiration(self): # Cache values can be set to expire cache = self.cache cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) cache.add("expire2", "newvalue") self.assertEqual(cache.get("expire2"), "newvalue") self.assertFalse(cache.has_key("expire3")) # noqa: W601
def test_unicode(self): # Unicode values can be cached cache = self.cache stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } # Test `set` for (key, value) in stuff.items(): cache.set(key, value) self.assertEqual(cache.get(key), value) # Test `add` for (key, value) in stuff.items(): cache.delete(key) cache.add(key, value) self.assertEqual(cache.get(key), value) # Test `set_many` for (key, value) in stuff.items(): cache.delete(key) cache.set_many(stuff) for (key, value) in stuff.items(): self.assertEqual(cache.get(key), value)
def test_zero_timeout(self): """ Passing in zero into timeout results in a value that is not cached """ cache = self.cache cache.set('key1', 'eggs', 0) self.assertIsNone(cache.get('key1')) cache.add('key2', 'ham', 0) self.assertIsNone(cache.get('key2')) cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0) self.assertIsNone(cache.get('key3')) self.assertIsNone(cache.get('key4'))
def get_context_data(self, **kwargs): api_kwargs = self.get_wp_api_kwargs(**kwargs) page = api_kwargs.get('page_number', 1) context = cache.get( "blog_list_cache" + self.blog_language + "_page_" + str(page)) context = super(BlogListView, self).get_context_data(**kwargs) if\ context is None else context cache.add( "blog_list_cache" + self.blog_language + "_page_" + str(page), context, cache_time) return context
def confirm_user(token): """ Confirm any pending subscriptions for the user with this token. If any of the subscribed newsletters have welcome messages, send them. :param token: User's token :param user_data: Dictionary with user's data from Exact Target, as returned by get_user_data(), or None if that wasn't available when this was called. :raises: BasketError for fatal errors, NewsletterException for retryable errors. """ get_lock(token) user_data = get_user_data(token=token) if user_data is None: user = get_sfmc_doi_user(token) if user and user.get('email'): get_lock(user['email']) user['optin'] = True try: sfdc.add(user) except sfapi.SalesforceMalformedRequest: # probably already know the email address sfdc.update({'email': user['email']}, user) statsd.incr('news.tasks.confirm_user.moved_from_sfmc') else: statsd.incr('news.tasks.confirm_user.confirm_user_not_found') return if user_data['optin']: # already confirmed return if not ('email' in user_data and user_data['email']): raise BasketError('token has no email in ET') sfdc.update(user_data, {'optin': True})
def save(self, *args, **kwargs): cache.add('BAN:'+self.address, '1', None) super(Ban, self).save(*args, **kwargs)
def acquire_lock(name, expires_in=None): """ Acquire a database lock. Raises LockUnavailable when the lock is unavailable. """ if cache.add(name, True, timeout=expires_in): return True raise exceptions.LockUnavailable(name)
def upload_video(public_video_id, file_object): """ Store a video file for transcoding. Args: public_video_id (str) file_object (file) """ # Make upload url unavailable immediately to avoid race conditions models.VideoUploadUrl.objects.filter(public_video_id=public_video_id).update(was_used=True) video_upload_url = models.VideoUploadUrl.objects.get(public_video_id=public_video_id) # Upload video backend.get().upload_video(public_video_id, file_object) # Create video object video = models.Video.objects.create( public_id=video_upload_url.public_video_id, owner=video_upload_url.owner, title=file_object.name ) if video_upload_url.playlist: video.playlists.add(video_upload_url.playlist) # Start transcoding send_task('transcode_video', args=(public_video_id,))
def cache_meeting_page(timeout=60*10, render_timeout=15): def _decorator(fn): @wraps(fn) def _inner(*args, **kwargs): meeting_pk = kwargs.pop('meeting_pk') # getting the meeting from the database is an implicit permission check meeting = get_object_or_404(Meeting, pk=meeting_pk) kwargs['meeting'] = meeting cache_key = cache_key_for_meeting_page(meeting, fn) lock_key = cache_key + ':render-lock' while True: html = cache.get(cache_key) if html is None: if cache.add(lock_key, 'in-progress', render_timeout): break else: time.sleep(1) else: return HttpResponse(html) try: html = fn(*args, **kwargs) cache.set(cache_key, html, timeout) return HttpResponse(html) finally: cache.delete(lock_key) return _inner return _decorator
def cache_incr(self, key): # memcache is only backend that can increment atomically try: # add first, to ensure the key exists cache.add(key, 0, timeout=self.expire_after()) cache.incr(key) except AttributeError: cache.set(key, cache.get(key, 0) + 1, self.expire_after())
def should_be_throttled(self, identifier, **kwargs): """ Returns whether or not the user has exceeded their throttle limit. Maintains a list of timestamps when the user accessed the api within the cache. Returns ``False`` if the user should NOT be throttled or ``True`` if the user should be throttled. """ key = self.convert_identifier_to_key(identifier) # Make sure something is there. cache.add(key, []) # Weed out anything older than the timeframe. minimum_time = int(time.time()) - int(self.timeframe) times_accessed = [access for access in cache.get(key) if access >= minimum_time] cache.set(key, times_accessed, self.expiration) if len(times_accessed) >= int(self.throttle_at): # Throttle them. return True # Let them through. return False
def check_queued_build(build_id): reset_database_connection() from metaci.build.models import Build try: build = Build.objects.get(id=build_id) except Build.DoesNotExist: time.sleep(1) build = Build.objects.get(id=build_id) # Check for concurrency blocking try: org = Org.objects.get(name=build.plan.org, repo=build.repo) except Org.DoesNotExist: message = 'Could not find org configuration for org {}'.format( build.plan.org) build.log = message build.set_status('error') build.save() return 'Could not find org configuration for org {}'.format( build.plan.org) if org.scratch: # For scratch orgs, we don't need concurrency blocking logic, just run # the build res_run = run_build.delay(build.id) build.task_id_check = None build.task_id_run = res_run.id build.save() return ('Org is a scratch org, running build concurrently ' + 'as task {}'.format(res_run.id)) else: # For persistent orgs, use the cache to lock the org status = cache.add( org.lock_id, 'build-{}'.format(build_id), timeout=BUILD_TIMEOUT, ) if status is True: # Lock successful, run the build res_run = run_build.delay(build.id, org.lock_id) build.task_id_run = res_run.id build.save() return 'Got a lock on the org, running as task {}'.format( res_run.id) else: # Failed to get lock, queue next check build.task_id_check = None build.set_status('waiting') build.log = 'Waiting on build #{} to complete'.format( cache.get(org.lock_id)) build.save() return ('Failed to get lock on org. ' + '{} has the org locked. Queueing next check.'.format( cache.get(org.lock_id)))
def test_cache_versioning_add(self): # add, default version = 1, but manually override version = 2 cache = self.cache cache2 = LRUObjectCache('lru2', dict(VERSION=2)) cache2._cache = cache._cache cache.add('answer1', 42, version=2) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.add('answer1', 37, version=2) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.add('answer1', 37, version=1) self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) # v2 add, using default version = 2 cache2.add('answer2', 42) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) cache2.add('answer2', 37) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) cache2.add('answer2', 37, version=1) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) # v2 add, default version = 2, but manually override version = 1 cache2.add('answer3', 42, version=1) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) cache2.add('answer3', 37, version=1) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) cache2.add('answer3', 37) self.assertEqual(cache.get('answer3', version=1), 42) self.assertEqual(cache.get('answer3', version=2), 37)