我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用transaction.get()。
def get_hook(): request = get_current_request() try: site = request.site search = queryUtility(ICatalogUtility) except (AttributeError, KeyError): return if not search: return # no search configured try: trns = tm(request).get() except RequestNotFound: trns = transaction.get() hook = None for _hook in trns._after_commit: if isinstance(_hook[0], CommitHook): hook = _hook[0] break if hook is None: hook = CommitHook(site, request) trns.addAfterCommitHook(hook) return hook
def client_cleanup(self): self.job.update_state(BackupJob.State.client_done, BackupJob.State.client_cleanup) del self.job.client_manifest_data del self.job.client_manifest_id_str try: del self.job.client_key_data del self.job.client_key_type except AttributeError: pass transaction.get().note('Deleted client keys of job %s' % self.job.id) transaction.commit() # TODO delete checkpoints # TODO do we actually want this? if we leave the cache, the next job has a good chance of rsyncing just a delta # TODO perhaps a per-client setting, to limit space usage on the client with multiple repositories.
def _add_completed_archive(self): log.debug('Saving archive metadata to database') archive = BorgArchive(self.repository, self._repository_key, self._manifest, self.job.archive_name, cache=self._cache) stats = archive.calc_stats(self._cache) duration = archive.ts_end - archive.ts ao = Archive( id=archive.fpr, repository=self.job.repository, name=archive.name, client=self.job.client, job=self.job, nfiles=stats.nfiles, original_size=stats.osize, compressed_size=stats.csize, deduplicated_size=stats.usize, duration=duration, timestamp=archive.ts, timestamp_end=archive.ts_end, ) self.job.archive = ao transaction.get().note('Added completed archive %s for job %s' % (ao.id, self.job.id)) transaction.commit() log.debug('Saved archive metadata')
def test_celery__TransactionAwareTask__delay__5(celery_session_worker, zcml): """It allows to run two tasks in a single session.""" auth = zope.component.getUtility( zope.authentication.interfaces.IAuthentication) principal = auth.getPrincipal('example.user') z3c.celery.celery.login_principal(principal) result1 = get_principal_title_task.delay() zope.security.management.endInteraction() principal = auth.getPrincipal('zope.user') z3c.celery.celery.login_principal(principal) result2 = get_principal_title_task.delay() transaction.commit() assert 'Ben Utzer' == result1.get() assert 'User' == result2.get()
def invalidate_new_back_revs(event): ''' Invalidate objects that rev_link to us Catch those objects which newly rev_link us ''' context = event.object updated = event.request._updated_uuid_paths initial = event.request._initial_back_rev_links.get(context.uuid, {}) properties = context.upgrade_properties() current = { path: set(simple_path_ids(properties, path)) for path in context.type_info.merged_back_rev } for rel, uuids in current.items(): for uuid in uuids.difference(initial.get(rel, ())): updated[uuid]
def _update_keys(self, model, unique_keys): keys_set = {(k, v) for k, values in unique_keys.items() for v in values} existing = { (key.name, key.value) for key in model.unique_keys } to_remove = existing - keys_set to_add = keys_set - existing session = self.DBSession() for pk in to_remove: key = session.query(Key).get(pk) session.delete(key) for name, value in to_add: key = Key(rid=model.rid, name=name, value=value) session.add(key) return to_add, to_remove
def _update_rels(self, model, links): session = self.DBSession() source = model.rid rels = {(k, uuid.UUID(target)) for k, targets in links.items() for target in targets} existing = { (link.rel, link.target_rid) for link in model.rels } to_remove = existing - rels to_add = rels - existing for rel, target in to_remove: link = session.query(Link).get((source, rel, target)) session.delete(link) for rel, target in to_add: link = Link(source_rid=source, rel=rel, target_rid=target) session.add(link) return to_add, to_remove
def add_transaction_record(session, flush_context, instances): txn = transaction.get() # Set data with txn.setExtendedInfo(name, value) data = txn._extension record = data.get('_snovault_transaction_record') if record is not None: if orm.object_session(record) is None: # Savepoint rolled back session.add(record) # Transaction has already been recorded return tid = data['tid'] = uuid.uuid4() record = TransactionRecord(tid=tid) data['_snovault_transaction_record'] = record session.add(record)
def record_transaction_data(session): txn = transaction.get() data = txn._extension if '_snovault_transaction_record' not in data: return record = data['_snovault_transaction_record'] # txn.note(text) if txn.description: data['description'] = txn.description # txn.setUser(user_name, path='/') -> '/ user_name' # Set by pyramid_tm as (userid, '') if txn.user: user_path, userid = txn.user.split(' ', 1) data['userid'] = userid record.data = {k: v for k, v in data.items() if not k.startswith('_')} session.add(record)
def set_transaction_isolation_level(session, sqla_txn, connection): ''' Set appropriate transaction isolation level. Doomed transactions can be read-only. ``transaction.doom()`` must be called before the connection is used. Othewise assume it is a write which must be REPEATABLE READ. ''' if connection.engine.url.drivername != 'postgresql': return txn = transaction.get() if not txn.isDoomed(): # connection.execute("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;") return data = txn._extension if 'snapshot_id' in data: connection.execute( _set_transaction_snapshot, snapshot_id=data['snapshot_id']) else: connection.execute("SET TRANSACTION READ ONLY;")
def mailer_send(subject="!", sender=None, recipients=[], body=None, html=None, attachments=[]): try: request = get_current_request() if sender is None: sender = request.registry.settings['lac.admin_email'] mailer = get_mailer(request) message = Message(subject=subject, sender=sender, recipients=recipients, body=body, html=html) for attachment in attachments: attachment = Attachment(attachment.title, attachment.mimetype, attachment) message.attach(attachment) if transaction.get().status == Status.COMMITTED: mailer.send_immediately(message) else: mailer.send(message) except Exception: pass
def set_failure_cause(self, kind, **kwargs): borgcube.utils.hook.borgcube_job_failure_cause(job=self, kind=kind, kwargs=kwargs) self.force_state(self.State.failed) self.failure_cause = { 'kind': kind, } self.failure_cause.update(kwargs) transaction.get().note('Set failure cause of job %s to %s' % (self.id, kind)) transaction.commit()
def action_form_view(self, request): dotted_path = request.GET.get('class') cls = ScheduledAction.get_class(dotted_path) if not cls: log.error('scheduled_action_form request for %r which is not a schedulable action', dotted_path) return HttpResponseBadRequest() return HttpResponse(cls.Form().as_table())
def delete_view(self, request): if request.method == 'POST': data_root().schedules.remove(self.schedule) transaction.get().note('Deleted schedule %s' % self.schedule.oid) transaction.commit() return self.parent.redirect_to()
def add_view(self, request): data = request.POST or None client_form = Client.Form(data) connection_form = RshClientConnection.Form(data) if data and client_form.is_valid() and connection_form.is_valid(): connection = RshClientConnection(**connection_form.cleaned_data) client = Client(connection=connection, **client_form.cleaned_data) transaction.get().note('Added client %s' % client.hostname) transaction.commit() return self.redirect_to() return self.render(request, 'core/client/add.html', { 'client_form': client_form, 'connection_form': connection_form, })
def add_view(self, request): client = self.parent.client data = request.POST or None form = JobConfigForm(data=data) advanced_form = JobConfigForm.AdvancedForm(data=data) if data and form.is_valid() and advanced_form.is_valid(): config = form.cleaned_data config.update(advanced_form.cleaned_data) config['paths'] = config.get('paths', '').split('\n') config['excludes'] = [s for s in config.get('excludes', '').split('\n') if s] repository = config.pop('repository') job_config = BackupConfig(client=client, repository=repository, label=config['label']) job_config._update(config) client.job_configs.append(job_config) transaction.get().note('Added job config to client %s' % client.hostname) transaction.commit() # TODO StringListValidator # TODO Pattern validation # TODO fancy pattern editor with test area return self[job_config.oid].redirect_to() return self.render(request, 'core/client/config_add.html', { 'form': form, 'advanced_form': advanced_form, })
def edit_view(self, request): client = self.config.client job_config = self.config data = request.POST or None job_config._p_activate() initial_data = dict(job_config.__dict__) initial_data['paths'] = '\n'.join(initial_data['paths']) initial_data['excludes'] = '\n'.join(initial_data['excludes']) form = JobConfigForm(data=data, initial=initial_data) advanced_form = JobConfigForm.AdvancedForm(data=data, initial=initial_data) if data and form.is_valid() and advanced_form.is_valid(): config = form.cleaned_data config.update(advanced_form.cleaned_data) config['paths'] = config.get('paths', '').split('\n') config['excludes'] = [s for s in config.get('excludes', '').split('\n') if s] job_config._update(config) # TODO StringListValidator # TODO Pattern validation # TODO fancy pattern editor with test area transaction.get().note('Edited job config %s of client %s' % (job_config.oid, client.hostname)) transaction.commit() return self.redirect_to() return self.render(request, 'core/client/config_edit.html', { 'client': client, 'form': form, 'advanced_form': advanced_form, 'job_config': job_config, })
def delete_view(self, request): client = self.parent.parent.client if request.method == 'POST': client.job_configs.remove(self.config) # Could just leave it there, but likely not the intention behind clicking (delete). for schedule in data_root().schedules: for action in list(schedule.actions): if getattr(action, 'job_config', None) == self.config: schedule.actions.remove(action) transaction.get().note('Deleted job config %s from client %s' % (self.config.oid, client.hostname)) transaction.commit() return self.redirect_to()
def add_view(self, request): data = request.POST or None form = RetentionPolicy.Form(data) if data and form.is_valid(): policy = RetentionPolicy(**form.cleaned_data) prune_root().policies.append(policy) transaction.get().note('Added prune retention policy %s' % policy.name) transaction.commit() # return redirect(prune_retention_policies) return self.render(request, 'core/prune/policy_add.html', { 'form': form, 'title': _('Add retention policy'), 'submit': _('Add retention policy'), })
def view(self, request): data = request.POST or None self.policy._p_activate() form = RetentionPolicy.Form(data, initial=self.policy.__dict__) if data and form.is_valid(): self.policy._update(form.cleaned_data) transaction.get().note('Edited prune retention policy %s' % self.policy.oid) transaction.commit() return self.parent.redirect_to() return self.render(request, 'core/prune/policy_add.html', { 'form': form, 'title': _('Edit retention policy'), 'submit': _('Save changes'), })
def add_view(self, request): data = request.POST or None form = PruneConfig.Form(data) if data and form.is_valid(): config = PruneConfig(**form.cleaned_data) prune_root().configs.append(config) transaction.get().note('Added prune config %s' % config.name) transaction.commit() return self.redirect_to() return self.render(request, 'core/prune/config_add.html', { 'form': form, 'title': _('Add prune configuration'), 'submit': _('Add prune configuration'), })
def view(self, request): data = request.POST or None form = PruneConfig.Form(data, initial=self.config.__dict__) if data and form.is_valid(): self.config._update(form.cleaned_data) transaction.get().note('Edited prune config %s' % self.config.oid) transaction.commit() return self.parent.redirect_to() return self.render(request, 'core/prune/config_add.html', { 'form': form, 'title': _('Edit prune configuration'), 'submit': _('Edit prune configuration'), })
def add_view(self, request): data = request.POST or None repository_form = Repository.Form(data) if data and repository_form.is_valid(): repository = Repository(**repository_form.cleaned_data) data_root().repositories.append(repository) transaction.get().note('Added repository %s' % repository.name) transaction.commit() return self.redirect_to() return self.render(request, 'core/repository/add.html', { 'repository_form': repository_form, })
def edit_view(self, request): data = request.POST or None repository = self.repository repository._p_activate() repository_form = Repository.Form(data, initial=repository.__dict__) if data and repository_form.is_valid(): repository._update(repository_form.cleaned_data) transaction.get().note('Edited repository %s' % repository.oid) transaction.commit() return self.redirect_to() return self.render(request, 'core/repository/edit.html', { 'repository_form': repository_form, })
def add_view(self, request): data = request.POST or None config_form = CheckConfig.Form(data) if data and config_form.is_valid(): config = CheckConfig(self.repository, **config_form.cleaned_data) self.repository.job_configs.append(config) transaction.get().note('Added check config to repository %s' % self.repository.oid) transaction.commit() return self.parent.redirect_to() return self.render(request, 'core/repository/config_add.html', { 'form': config_form, })
def delete_view(self, request): if request.method == 'POST': repository = self.parent.repository repository.job_configs.remove(self.config) transaction.get().note('Deleted check config %s from repository %s' % (self.config.oid, repository.oid)) transaction.commit() return self.parent.parent.redirect_to()
def __init__(self): self.metrics = PersistentList() from .builtin_metrics import ArchiveCount, TotalData, BackupsToday self.metrics.append(ArchiveCount()) self.metrics.append(TotalData()) self.metrics.append(BackupsToday()) transaction.get().note('web: added default metrics') transaction.commit()
def synthesize_crypto(job): with open_repository(job.repository) as repository: if bin_to_hex(repository.id) != job.repository.repository_id: raise RepositoryIDMismatch(bin_to_hex(repository.id), job.repository.repository_id) manifest, key = Manifest.load(repository) client_key = synthesize_client_key(key, repository) if not isinstance(client_key, PlaintextKey): job.client_key_data = client_key.get_key_data() job.client_key_type = client_key.synthetic_type client_manifest = SyntheticManifest(client_key, repository.id) job.client_manifest_data = bin_to_hex(client_manifest.write()) job.client_manifest_id_str = client_manifest.id_str transaction.get().note('Synthesized crypto for job %s' % job.id) transaction.commit()
def create_job(self): job = BackupJob( repository=self.repository, client=self.client, config=self, ) transaction.get().note('Created backup job from check config %s on client %s' % (self.oid, self.client.hostname)) log.info('Created job for client %s, job config %s', self.client.hostname, self.oid)
def create_job(self): job = PruneJob(config=self) transaction.get().note('Created prune job from config %s' % self.oid) log.info('Created prune job for config %s', self.oid)
def _add_checkpoint(self, id): self.job.checkpoint_archives.append(bin_to_hex(id)) transaction.get().note('Added checkpoint archive %s for job %s' % (bin_to_hex(id), self.job.id)) transaction.commit()
def get(self, id): """API""" repo_data = self.repository.get(id) client_data = self._repo_to_client(id, repo_data) return client_data
def _cache_sync_archive(self, archive_id): log.debug('Started cache sync') add_chunk = self._cache.chunks.add cdata = self._cache.repository.get(archive_id) _, data = self._cache.key.decrypt(archive_id, cdata) add_chunk(archive_id, 1, len(data), len(cdata)) try: archive = ArchiveItem(internal_dict=msgpack.unpackb(data)) except (TypeError, ValueError, AttributeError) as error: log.error('Corrupted/unknown archive metadata: %s', error) return False if archive.version != 1: log.error('Unknown archive metadata version %r', archive.version) return False unpacker = msgpack.Unpacker() for item_id, chunk in zip(archive.items, self._cache.repository.get_many(archive.items)): _, data = self._cache.key.decrypt(item_id, chunk) add_chunk(item_id, 1, len(data), len(chunk)) unpacker.feed(data) for item in unpacker: if not isinstance(item, dict): log.error('Error: Did not get expected metadata dict - archive corrupted!') return False if b'chunks' in item: for chunk_id, size, csize in item[b'chunks']: add_chunk(chunk_id, 1, size, csize) log.debug('Completed cache sync') return True
def test_celery__TransactionAwareTask__delay__6(celery_session_worker, zcml): """It allows overriding the principal.""" auth = zope.component.getUtility( zope.authentication.interfaces.IAuthentication) principal = auth.getPrincipal('example.user') z3c.celery.celery.login_principal(principal) result = get_principal_title_task.delay(_principal_id_='zope.user') transaction.commit() assert 'User' == result.get()
def test_celery__TransactionAwareTask____call____1(celery_session_worker): """It aborts the transaction in case of an error during task execution.""" result = exception_task.delay() transaction.commit() with pytest.raises(Exception) as err: result.get() # Celery wraps errors dynamically as celery.backends.base.<ErrorName>, so # we have to dig deep here. assert 'RuntimeError' == err.value.__class__.__name__
def conflict_task(bind=True, context=None, datetime=None): """Dummy task which injects a DataManager that votes a ConflictError.""" transaction.get().join(VoteExceptionDataManager())
def test_celery__TransactionAwareTask____call____2( celery_session_worker, interaction): """It aborts the transaction and retries in case of an ConflictError.""" result = conflict_task.delay() transaction.commit() with pytest.raises(Exception) as err: result.get() assert 'MaxRetriesExceededError' == err.value.__class__.__name__
def test_celery__TransactionAwareTask____call____3( celery_session_worker, zcml): """It runs as given principal in asynchronous mode.""" auth = zope.component.getUtility( zope.authentication.interfaces.IAuthentication) principal = auth.getPrincipal('example.user') z3c.celery.celery.login_principal(principal) result = get_principal_title_task.delay() transaction.commit() assert 'Ben Utzer' == result.get()
def test_celery__TransactionAwareTask____call____4( celery_session_worker, interaction): """It propagates the task_id to the worker.""" job = get_task_id.apply_async(task_id='my-nice-task-id') transaction.commit() assert 'my-nice-task-id' == job.get()
def test_celery__TransactionAwareTask__run_in_worker__1( celery_session_worker, storage_file, interaction): """It handles specific exceptions in a new transaction after abort.""" job = except_with_handler.delay() transaction.commit() with pytest.raises(Exception): job.get() with open_zodb_copy(storage_file) as app: assert [('data', ('a1', 'a2', 1, 4, u'User'))] == list(app.items())
def test_celery__TransactionAwareTask__run_in_worker__2( celery_session_worker, storage_file, interaction): """It handles a specific exceptions to abort the transaction but still count as a successful job.""" job = success_but_abort_transaction.delay() transaction.commit() assert 'done' == job.get() with open_zodb_copy(storage_file) as app: assert 'flub' not in app
def commit_error_task(bind=True): transaction.get().join(CommitExceptionDataManager())
def _join_transaction(self): if not self._needs_to_join: return dm = CeleryDataManager(self) transaction.get().join(dm) self._needs_to_join = False
def __repr__(self): """Custom repr.""" return '<{0.__module__}.{0.__name__} for {1}, {2}>'.format( self.__class__, transaction.get(), self.session)
def update_item(storage, context): target_version = context.type_info.schema_version current_version = context.properties.get('schema_version', '') update = False errors = [] properties = context.properties if target_version is None or current_version == target_version: unique_keys = context.unique_keys(properties) links = context.links(properties) keys_add, keys_remove = storage._update_keys(context.model, unique_keys) if keys_add or keys_remove: update = True rels_add, rels_remove = storage._update_rels(context.model, links) if rels_add or rels_remove: update = True else: properties = deepcopy(properties) upgrader = context.registry[UPGRADER] properties = upgrader.upgrade( context.type_info.name, properties, current_version, target_version, context=context, registry=context.registry) if 'schema_version' in properties: del properties['schema_version'] schema = context.type_info.schema properties['uuid'] = str(context.uuid) validated, errors = validate(schema, properties, properties) # Do not send modification events to skip indexing context.update(validated) update = True return update, errors
def batch_upgrade(request): request.datastore = 'database' transaction.get().setExtendedInfo('upgrade', True) batch = request.json['batch'] root = request.root storage = request.registry[STORAGE].write session = storage.DBSession() results = [] for uuid in batch: item_type = None update = False error = False sp = session.begin_nested() try: item = find_resource(root, uuid) item_type = item.type_info.item_type update, errors = update_item(storage, item) except Exception as e: logger.error('Error %s updating: /%s/%s' % (e, item_type, uuid)) sp.rollback() error = True else: if errors: # redmine 5161 sometimes error.path has an int errortext = [ '%s: %s' % ('/'.join([str(x) or '<root>' for x in error.path]), error.message) for error in errors] logger.error( 'Validation failure: /%s/%s\n%s', item_type, uuid, '\n'.join(errortext)) sp.rollback() error = True else: sp.commit() results.append((item_type, uuid, update, error)) return {'results': results}
def includeme(config): registry = config.registry registry[STORAGE] = RDBStorage(registry[DBSESSION]) global _DBSESSION _DBSESSION = registry[DBSESSION] if registry.settings.get('blob_bucket'): registry[BLOBS] = S3BlobStorage( registry.settings['blob_bucket'], read_profile_name=registry.settings.get('blob_read_profile_name'), store_profile_name=registry.settings.get('blob_store_profile_name'), ) else: registry[BLOBS] = RDBBlobStorage(registry[DBSESSION])
def update(self, model, properties=None, sheets=None, unique_keys=None, links=None): session = self.DBSession() sp = session.begin_nested() try: session.add(model) self._update_properties(model, properties, sheets) if links is not None: self._update_rels(model, links) if unique_keys is not None: keys_add, keys_remove = self._update_keys(model, unique_keys) sp.commit() except (IntegrityError, FlushError): sp.rollback() else: return # Try again more carefully try: session.add(model) self._update_properties(model, properties, sheets) if links is not None: self._update_rels(model, links) session.flush() except (IntegrityError, FlushError): msg = 'UUID conflict' raise HTTPConflict(msg) assert unique_keys is not None conflicts = [pk for pk in keys_add if session.query(Key).get(pk) is not None] assert conflicts msg = 'Keys conflict: %r' % conflicts raise HTTPConflict(msg)
def get_blob(self, download_meta): blob_id = download_meta['blob_id'] if isinstance(blob_id, str): blob_id = uuid.UUID(blob_id) session = self.DBSession() blob = session.query(Blob).get(blob_id) return blob.data
def __setitem__(self, key, value): current = self.data.get(key, None) if current is None: self.data[key] = current = CurrentPropertySheet(name=key, rid=self.rid) propsheet = PropertySheet(name=key, properties=value, rid=self.rid) current.propsheet = propsheet