我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用alembic.op.get_bind()。
def upgrade(): bind = op.get_bind() session = Session(bind=bind) ### commands auto generated by Alembic - please adjust! ### op.add_column('users', sa.Column('timezone', sa.String(length=64), server_default='UTC', nullable=False)) ### end Alembic commands ### key = 'timezone' for user in session.query(User): settings = json.loads(user.settings) if user.settings else {} if key in settings: user.timezone = settings[key] del settings[key] user.settings = json.dumps(settings) else: user.timezone = DEFAULT_TIMEZONE session.commit()
def upgrade(): task = table('task', column('id'), column('info') ) conn = op.get_bind() query = select([task.c.id, task.c.info]) tasks = conn.execute(query) update_values = [] for row in tasks: info_data = row.info info_dict = json.loads(info_data) if info_dict.get('n_answers'): del info_dict['n_answers'] update_values.append({'task_id': row.id, 'new_info': json.dumps(info_dict)}) task_update = task.update().\ where(task.c.id == bindparam('task_id')).\ values(info=bindparam('new_info')) if len(update_values) > 0: conn.execute(task_update, update_values)
def downgrade(): task = table('task', column('id'), column('info'), column('n_answers') ) conn = op.get_bind() query = select([task.c.id, task.c.info, task.c.n_answers]) tasks = conn.execute(query) update_values = [] for row in tasks: info_data = row.info info_dict = json.loads(info_data) info_dict['n_answers'] = row.n_answers update_values.append({'task_id': row.id, 'new_info': json.dumps(info_dict)}) task_update = task.update().\ where(task.c.id == bindparam('task_id')).\ values(info=bindparam('new_info')) if len(update_values) > 0: conn.execute(task_update, update_values)
def upgrade(): op.alter_column('kubes', 'name', existing_type=sa.VARCHAR(length=64), nullable=False) op.create_index('one_default', 'kubes', ['is_default'], unique=True, postgresql_where=sa.text(u'kubes.is_default IS true')) op.drop_constraint(u'kubes_is_default_key', 'kubes', type_='unique') op.alter_column('packages', 'name', existing_type=sa.VARCHAR(length=64), nullable=False) op.alter_column('packages', 'prefix', existing_type=sa.VARCHAR(), nullable=False) op.alter_column('packages', 'suffix', existing_type=sa.VARCHAR(), nullable=False) session = Session(bind=op.get_bind()) session.query(PackageKube).filter(sa.or_( PackageKube.package_id.is_(None), PackageKube.kube_id.is_(None), )).delete() session.commit() op.alter_column('package_kube', 'kube_id', existing_type=sa.INTEGER(), nullable=False) op.alter_column('package_kube', 'package_id', existing_type=sa.INTEGER(), nullable=False)
def upgrade(): # first check if the user already has this done. This should only be # true for users who are upgrading from a previous version of Airflow # that predates Alembic integration conn = op.get_bind() inspector = Inspector.from_engine(conn) # this will only be true if 'connection' already exists in the db, # but not if alembic created it in a previous migration if 'connection' in inspector.get_table_names(): col_names = [c['name'] for c in inspector.get_columns('connection')] if 'is_encrypted' in col_names: return op.add_column( 'connection', sa.Column('is_encrypted', sa.Boolean, unique=False, default=False)) conn = op.get_bind() conn.execute( connectionhelper.update().values(is_encrypted=False) )
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('law', sa.Column('search_vector', sqlalchemy_utils.types.ts_vector.TSVectorType(), nullable=True)) op.create_index('ix_law_search_vector', 'law', ['search_vector'], unique=False, postgresql_using='gin') op.add_column('page', sa.Column('search_vector', sqlalchemy_utils.types.ts_vector.TSVectorType(), nullable=True)) op.create_index('ix_page_search_vector', 'page', ['search_vector'], unique=False, postgresql_using='gin') op.add_column('post', sa.Column('search_vector', sqlalchemy_utils.types.ts_vector.TSVectorType(), nullable=True)) op.create_index('ix_post_search_vector', 'post', ['search_vector'], unique=False, postgresql_using='gin') op.add_column('proposal', sa.Column('search_vector', sqlalchemy_utils.types.ts_vector.TSVectorType(), nullable=True)) op.create_index('ix_proposal_search_vector', 'proposal', ['search_vector'], unique=False, postgresql_using='gin') op.add_column('topic', sa.Column('search_vector', sqlalchemy_utils.types.ts_vector.TSVectorType(), nullable=True)) op.create_index('ix_topic_search_vector', 'topic', ['search_vector'], unique=False, postgresql_using='gin') op.add_column('user', sa.Column('search_vector', sqlalchemy_utils.types.ts_vector.TSVectorType(), nullable=True)) op.create_index('ix_user_search_vector', 'user', ['search_vector'], unique=False, postgresql_using='gin') # ### manually inserted searchable sync ### conn = op.get_bind() sync_trigger(conn, 'law', 'search_vector', ['content']) sync_trigger(conn, 'page', 'search_vector', ['title', 'content']) sync_trigger(conn, 'post', 'search_vector', ['content']) sync_trigger(conn, 'proposal', 'search_vector', ['description']) sync_trigger(conn, 'topic', 'search_vector', ['name', 'description']) sync_trigger(conn, 'user', 'search_vector', ['username']) # ### end Alembic commands ###
def upgrade(): ip_policy_cidrs = table('quark_ip_policy_cidrs', column('id', sa.String(length=36)), column('first_ip', INET()), column('last_ip', INET()), column('cidr', sa.String(length=64))) connection = op.get_bind() # 1. Retrieve all ip_policy_cidr rows. results = connection.execute( select([ip_policy_cidrs.c.id, ip_policy_cidrs.c.cidr]) ).fetchall() # 2. Populate first_ip, last_ip for each IP Policy CIDR. for ippc in results: net = netaddr.IPNetwork(ippc["cidr"]).ipv6() connection.execute(ip_policy_cidrs.update().values( first_ip=net.first, last_ip=net.last).where( ip_policy_cidrs.c.id == ippc["id"]))
def upgrade(): bind = op.get_bind() # NOTE(sileht): mysql can't delete an index on a foreign key # even this one is not the index used by the foreign key itself... # In our case we have two indexes fk_resource_history_id_resource_id and # and ix_resource_history_id, we want to delete only the second, but mysql # can't do that with a simple DROP INDEX ix_resource_history_id... # so we have to remove the constraint and put it back... if bind.engine.name == "mysql": op.drop_constraint("fk_resource_history_id_resource_id", type_="foreignkey", table_name="resource_history") for table, colname in resource_tables + history_tables + other_tables: op.drop_index("ix_%s_%s" % (table, colname), table_name=table) if bind.engine.name == "mysql": op.create_foreign_key("fk_resource_history_id_resource_id", "resource_history", "resource", ["id"], ["id"], ondelete="CASCADE")
def upgrade(): """Upgrade the database to a newer revision.""" # ### commands auto generated by Alembic - please adjust! ### # See https://bitbucket.org/zzzeek/alembic/issues/123/a-way-to-run-non-transactional-ddl connection = None if not op.get_context().as_sql: connection = op.get_bind() connection.execution_options(isolation_level='AUTOCOMMIT') op.execute("ALTER TYPE ecosystem_backend_enum ADD VALUE 'nuget'") op.execute("INSERT INTO ecosystems VALUES " "('{id}', '{name}', '{backend}', '{url}', '{fetch_url}')". format(id=8, name='nuget', backend='nuget', url='https://nuget.org/', fetch_url='https://api.nuget.org/packages/')) if connection is not None: connection.execution_options(isolation_level='READ_COMMITTED') # ### end Alembic commands ###
def downgrade(): connection = op.get_bind() if connection.engine.dialect.name != "sqlite": # user/models.py op.alter_column('users', 'date_joined', type_=sa.DateTime(), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('users', 'lastseen', type_=sa.DateTime(), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('users', 'birthday', type_=sa.DateTime(), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('users', 'last_failed_login', type_=sa.DateTime(), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) # message/models.py op.alter_column('conversations', 'date_created', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('messages', 'date_created', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) # forum/models.py op.alter_column('topicsread', 'last_read', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('forumsread', 'last_read', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('forumsread', 'cleared', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('reports', 'reported', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('reports', 'zapped', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('posts', 'date_created', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('posts', 'date_modified', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('topics', 'date_created', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('topics', 'last_updated', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True) op.alter_column('forums', 'last_post_created', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
def upgrade(): # first check if the user already has this done. This should only be # true for users who are upgrading from a previous version of Airflow # that predates Alembic integration inspector = Inspector.from_engine(settings.engine) # this will only be true if 'connection' already exists in the db, # but not if alembic created it in a previous migration if 'connection' in inspector.get_table_names(): col_names = [c['name'] for c in inspector.get_columns('connection')] if 'is_encrypted' in col_names: return op.add_column( 'connection', sa.Column('is_encrypted', sa.Boolean, unique=False, default=False)) conn = op.get_bind() conn.execute( connectionhelper.update().values(is_encrypted=False) )
def upgrade(): conn = op.get_bind() # ### commands auto generated by Alembic - please adjust! ### op.add_column('projects', sa.Column('task_creation_mode', sa.Integer(), nullable=True)) op.create_index('idx_geometry', 'projects', ['geometry'], unique=False, postgresql_using='gist') op.add_column('tasks', sa.Column('extra_properties', sa.Unicode(), nullable=True)) for project in conn.execute(projects.select()): zooms = conn.execute( sa.sql.expression.select([tasks.c.zoom]).distinct(tasks.c.zoom) .where(tasks.c.project_id == project.id)) zooms = zooms.fetchall() if len(zooms) == 1 and zooms[0] == (None,): op.execute( projects.update().where(projects.c.id == project.id) .values(task_creation_mode=1)) # ### end Alembic commands ###
def upgrade(): op.create_table( 'pokemon', sa.Column('id', sa.Integer, primary_key=True), sa.Column('name', sa.Text, unique=True, nullable=False), sa.Column('flavor_text', sa.Text, nullable=False), sa.Column('habitat', sa.Text, nullable=True, default=None), sa.Column('color', sa.Text, nullable=False), sa.Column('shape', sa.Text, nullable=False), sa.Column('search_vector', TSVectorType(searchable)), sa.Column('inserted_at', sa.DateTime, default=sa.func.current_timestamp(), nullable=False), sa.Column('updated_at', sa.DateTime, default=sa.func.current_timestamp(), onupdate=sa.func.current_timestamp(), nullable=False) ) conn = op.get_bind() sync_trigger(conn, 'pokemon', 'search_vector', searchable)
def upgrade(): op.create_table( 'pokedexes', sa.Column('id', sa.Integer, primary_key=True), sa.Column('name', sa.Text, unique=True, nullable=False), sa.Column('official_name', sa.Text, unique=True, nullable=False), sa.Column('region', sa.Text, nullable=True, default=None), sa.Column('description', sa.Text, nullable=True, default=None), sa.Column('search_vector', TSVectorType(searchable)), sa.Column('inserted_at', sa.DateTime, default=sa.func.current_timestamp(), nullable=False), sa.Column('updated_at', sa.DateTime, default=sa.func.current_timestamp(), onupdate=sa.func.current_timestamp(), nullable=False) ) conn = op.get_bind() sync_trigger(conn, 'pokedexes', 'search_vector', searchable)
def upgrade(): op.create_table( 'moves', sa.Column('id', sa.Integer, primary_key=True), sa.Column('name', sa.Text, unique=True, nullable=False), sa.Column('flavor_text', sa.Text, nullable=True, default=None), sa.Column('short_effect', sa.Text, nullable=False), sa.Column('effect', sa.Text, nullable=False), sa.Column('damage_class', sa.Text, nullable=True, default=None), sa.Column('power_points', sa.Integer, nullable=True, default=None), sa.Column('power', sa.Integer, nullable=True, default=None), sa.Column('accuracy', sa.Integer, nullable=True, default=None), sa.Column('search_vector', TSVectorType(searchable)), sa.Column('inserted_at', sa.DateTime, default=sa.func.current_timestamp(), nullable=False), sa.Column('updated_at', sa.DateTime, default=sa.func.current_timestamp(), onupdate=sa.func.current_timestamp(), nullable=False) ) conn = op.get_bind() sync_trigger(conn, 'moves', 'search_vector', searchable)
def upgrade(): conn = op.get_bind() func = sa.DDL("""CREATE FUNCTION set_meta_updated() RETURNS TRIGGER LANGUAGE plpgsql AS $$ BEGIN NEW.meta_updated := now(); RETURN NEW; END; $$;""") conn.execute(func) for table in updatable_tables: trigger_params = {'trigger': ('%s_set_meta_updated' % table), 'table': table} trigger = ("""CREATE TRIGGER %(trigger)s BEFORE UPDATE ON %(table)s FOR EACH ROW EXECUTE PROCEDURE set_meta_updated();""" % trigger_params) conn.execute(trigger)
def upgrade(): bind = op.get_bind() ### commands auto generated by Alembic - please adjust! ### op.add_column('exercises', sa.Column('chapter_id', sa.Integer(), nullable=True)) op.add_column('exercises', sa.Column('section_id', sa.Integer(), nullable=True)) op.add_column('exercises', sa.Column('book_row_id', sa.Integer(), nullable=True)) op.add_column('subjects', sa.Column('book_url', sa.String(), nullable=True)) ### end Alembic commands ### data = [ {'id': 1, 'book_url': 'https://staging-tutor.cnx.org/contents/d52e93f4-8653-4273-86da-3850001c0786'}, {'id': 2, 'book_url': 'https://staging-tutor.cnx.org/contents/334f8b61-30eb-4475-8e05-5260a4866b4b'} ] for item in data: update = sa.update(subject_table)\ .where(subject_table.c.id == item['id'])\ .values(dict(book_url=item['book_url'])) bind.execute(update)
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('nyaa_torrents', sa.Column('comment_count', sa.Integer(), nullable=False)) op.create_index(op.f('ix_nyaa_torrents_comment_count'), 'nyaa_torrents', ['comment_count'], unique=False) op.add_column('sukebei_torrents', sa.Column('comment_count', sa.Integer(), nullable=False)) op.create_index(op.f('ix_sukebei_torrents_comment_count'), 'sukebei_torrents', ['comment_count'], unique=False) # ### end Alembic commands ### connection = op.get_bind() print('Updating comment counts on nyaa_torrents...') connection.execute(sa.sql.text(COMMENT_UPDATE_SQL.format('nyaa'))) print('Done.') print('Updating comment counts on sukebei_torrents...') connection.execute(sa.sql.text(COMMENT_UPDATE_SQL.format('sukebei'))) print('Done.')
def _get_ehlv_class(): table_prefix = context.config.get_main_option('table_prefix') bind = op.get_bind() AutoBase = _get_autobase(table_prefix, bind) return AutoBase.classes.EnvironmentHierarchyLevelValue
def _get_session(): return sa.orm.Session(bind=op.get_bind(), autocommit=True)
def upgrade(): context = op.get_context() connection = op.get_bind() op.create_table('message_blacklist', sa.Column('id', sa.Integer(), nullable=False), sa.Column('login_id', sa.BigInteger(), nullable=False), sa.Column('blacklist', postgresql.ARRAY(sa.Integer)), sa.ForeignKeyConstraint(['login_id'], ['login.id'], ondelete='CASCADE', name="ref_message_blacklist_login_id_to_login"), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('idx_message_blacklist_login_id'), 'message_blacklist', ['login_id'], unique=True)
def upgrade(): ### commands auto generated by Alembic - please adjust! ### conn = op.get_bind() op.add_column('episode', sa.Column('search_vector', sqlalchemy_utils.types.ts_vector.TSVectorType(), nullable=True)) op.create_index('ix_episode_search_vector', 'episode', ['search_vector'], unique=False, postgresql_using='gin') sync_trigger(conn, 'episode', 'search_vector', ['title', 'description']) ### end Alembic commands ###
def get_bind(self): """Return the current 'bind'. Under normal circumstances, this is the :class:`~sqlalchemy.engine.Connection` currently being used to emit SQL to the database. In a SQL script context, this value is ``None``. [TODO: verify this] """ return self.migration_context.impl.bind
def upgrade(): bind = op.get_bind() session = Session(bind=bind) ### commands auto generated by Alembic - please adjust! ### op.add_column('kubes', sa.Column('is_default', sa.Boolean(), nullable=True)) op.create_unique_constraint(None, 'kubes', ['is_default']) ### end Alembic commands ### kube = session.query(Kube).filter(Kube.id >= 0).order_by(Kube.id).first() if kube is not None: kube.is_default = True session.commit()
def upgrade(): op.add_column('pod_states', sa.Column('kube_id', sa.Integer(), nullable=True)) session = sa.orm.sessionmaker()(bind=op.get_bind()) for (pod_id, kube_id) in session.query(Pod.id, Pod.kube_id): session.query(PodState).filter_by(pod_id=pod_id).update( {'kube_id': kube_id}) session.commit() op.alter_column('pod_states', 'kube_id', nullable=False)
def upgrade(): conn = op.get_bind() q = conn.execute("SELECT count_type FROM packages WHERE name='Standard package'") r = q.fetchall() if len(r) and len(r[0]) and r[0][0] is None: conn.execute("UPDATE packages SET count_type='fixed' WHERE name='Standard package'") op.alter_column('packages', 'count_type', nullable=False, server_default='fixed')
def upgrade(): session = Session(bind=op.get_bind()) op.drop_column('system_settings', 'created') op.drop_column('system_settings', 'deleted') op.add_column('system_settings', sa.Column('label', sa.Text, nullable=True)) op.add_column('system_settings', sa.Column('description', sa.Text, nullable=True)) op.add_column('system_settings', sa.Column('placeholder', sa.String, nullable=True)) billing_link = session.query(SystemSettings).filter_by(name='billing_apps_link').order_by(SystemSettings.id.desc()).first() if billing_link is not None: last = billing_link.id session.query(SystemSettings).filter(SystemSettings.id!=last).delete() billing_link.label = 'Link to billing system script' billing_link.description = 'Link to predefined application request processing script' billing_link.placeholder = 'http://whmcs.com/script.php' else: bl = SystemSettings(name='billing_apps_link', label='Link to billing system script', description='Link to predefined application request processing script', placeholder = 'http://whmcs.com/script.php') session.add(bl) pd = SystemSettings(name='persitent_disk_max_size', value='10', label='Persistent disk maximum size', description='maximum capacity of a user container persistent disk in GB', placeholder = 'Enter value to limit PD size') session.add(pd) ms = SystemSettings(name='default_smtp_server', label='Default SMTP server', description='Default SMTP server', placeholder = 'Default SMTP server') session.add(ms) session.commit() op.create_unique_constraint('uq_system_settings_name', 'system_settings', ['name'])
def upgrade(): bind = op.get_bind() op.add_column('packages', sa.Column('is_default', sa.Boolean(), nullable=True)) op.create_unique_constraint(None, 'packages', ['is_default']) bind.execute("UPDATE packages SET is_default=true WHERE id in (SELECT MIN(id) FROM packages)")
def downgrade_data(): session = Session(bind=op.get_bind()) for cs in session.query(ContainerState).all(): cs.pod_id = session.query(PodState).get(cs.pod_state_id).pod_id session.commit()
def upgrade(): session = Session(bind=op.get_bind()) pods = session.query(MenuItem).filter(MenuItem.name=='Pods').one() pods.path = '/pods/' admin = session.query(Role).filter(Role.rolename=='Admin').one() for i in session.query(MenuItemRole).filter(MenuItemRole.role!=admin): if i.menuitem.name=='Settings': session.delete(i) session.commit()
def downgrade(): session = Session(bind=op.get_bind()) pods = session.query(MenuItem).filter(MenuItem.name=='Pods').one() pods.path = '/' setts = session.query(MenuItem).filter(MenuItem.name=='Settings').first() if setts is not None: session.add_all(MenuItemRole(role=role, menuitem=setts) for role in session.query(Role).filter(Role.rolename.in_(['User', 'TrialUser']))) session.commit()
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column( 'persistent_disk', sa.Column('state', sa.Integer(), nullable=False, server_default='0') ) conn = op.get_bind() conn.execute("UPDATE persistent_disk SET state=0") ### end Alembic commands ###
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('kubes', sa.Column('included_traffic', sa.Integer(), server_default='0', nullable=False)) conn = op.get_bind() conn.execute("UPDATE kubes SET included_traffic=total_traffic") op.drop_column('kubes', 'total_traffic') ### end Alembic commands ###
def upgrade(): bind = op.get_bind() session = Session(bind=bind) ### commands auto generated by Alembic - please adjust! ### op.add_column( 'container_states', sa.Column('docker_id', sa.String(length=80), server_default='unknown', nullable=False) ) op.execute("ALTER TABLE container_states DROP CONSTRAINT container_states_pkey, "\ "ADD CONSTRAINT container_states_pkey PRIMARY KEY "\ "(pod_id, container_name, docker_id, kubes, start_time);") # op.execute() try: # Try to get docker_id for current container states. # 1. Extract all DB states without end time # 2. Get pods information for those containers from kubes-api # 3. Set docker_id for selected DB-states states = session.query(ContainerState).filter( ContainerState.end_time == None) containers = { (item.pod_id, item.container_name): item for item in states } container_ids = _get_container_ids() for key, state in containers: if key not in container_ids: continue state.docker_id = container_ids[key] session.commit() except Exception as err: # We will not break the migration in case of failed update of # some docker_id fields. Just warn about it. logger.warning( u'Failed to set actual docker_id for currently running containers: %s', err) ### end Alembic commands ###
def upgrade(): bind = op.get_bind() session = Session(bind=bind) session._model_changes = False # workaround for Flask-SQLAlchemy m1 = Notification(type='info', message='CLN_NOTIFICATION', description='') session.add(m1) session.commit()
def downgrade(): bind = op.get_bind() session = Session(bind=bind) session._model_changes = False # workaround for Flask-SQLAlchemy m = session.query(Notification).filter_by(message='CLN_NOTIFICATION').first() if m is not None: session.delete(m) session.commit()
def upgrade(): conn = op.get_bind() conn.execute("UPDATE packages SET period='month' WHERE id=0")
def downgrade(): conn = op.get_bind() conn.execute("UPDATE packages SET period='hour' WHERE id=0")
def upgrade(): session = sa.orm.sessionmaker()(bind=op.get_bind()) permission = session.query(Permission)\ .join(Role, Role.id == Permission.role_id).join(Resource, Permission.resource_id == Resource.id)\ .filter(Role.rolename == 'TrialUser').filter(Resource.name == 'pods')\ .filter(Permission.name == 'create').one() permission.allow = True session.commit()
def downgrade(): session = sa.orm.sessionmaker()(bind=op.get_bind()) permission = session.query(Permission)\ .join(Role, Role.id == Permission.role_id).join(Resource, Permission.resource_id == Resource.id)\ .filter(Role.rolename == 'TrialUser').filter(Resource.name == 'pods')\ .filter(Permission.name == 'create').one() permission.allow = False session.commit()
def upgrade(): conn = op.get_bind() op.drop_column('predefined_apps', 'user_id') op.add_column('pods', sa.Column( 'template_plan_name', sa.String(24), nullable=True)) op.create_unique_constraint('resource_role_name_unique', 'rbac_permission', ['resource_id', 'role_id', 'name']) op.add_column( 'system_settings', sa.Column('setting_group', sa.Text, default=''))
def downgrade(): conn = op.get_bind() op.add_column('predefined_apps', sa.Column( 'user_id', sa.Integer, sa.ForeignKey('users.id'), nullable=False, server_default='1')) op.drop_column('pods', 'template_plan_name') op.drop_constraint('resource_role_name_unique', 'rbac_permission') op.drop_column('system_settings', 'setting_group')
def downgrade(): bind = op.get_bind() Base.metadata.bind = bind ### commands auto generated by Alembic - please adjust! ### op.add_column(u'predefined_apps', sa.Column('template', sa.TEXT(), nullable=True)) op.drop_column(u'predefined_apps', 'is_deleted') downgrade_data(bind) op.alter_column(u'predefined_apps', u'template', nullable=False) op.drop_column(u'pods', 'template_version_id') op.drop_table('predefined_app_templates') ### end Alembic commands ###
def upgrade(): conn = op.get_bind() conn.execute("DELETE FROM session_data") op.drop_column('session_data', 'data') op.add_column('session_data', sa.Column('user_id', sa.Integer(), nullable=False)) op.add_column('session_data', sa.Column('role_id', sa.Integer(), nullable=False))
def upgrade(): op.add_column('kubes', sa.Column('disk_space_units', sa.String(3), server_default='MB', nullable=False)) session = sa.orm.sessionmaker()(bind=op.get_bind()) for kube in session.query(Kube).all(): kube.disk_space /= 2 ** 20 session.commit()
def downgrade(): session = sa.orm.sessionmaker()(bind=op.get_bind()) for kube in session.query(Kube).all(): kube.disk_space *= 2 ** 20 session.commit() op.drop_column('kubes', 'disk_space_units')
def upgrade(): op.add_column('pods', sa.Column('template_id', sa.Integer(), nullable=True)) bind = op.get_bind() session = sessionmaker()(bind=bind) ku = session.query(User).filter( User.username == KUBERDOCK_INTERNAL_USER ).first() if not ku: logger.warning('Internal user not found: %s', KUBERDOCK_INTERNAL_USER) return if not ku.password_hash: ku.password = uuid.uuid4().hex ku.get_token() session.commit()
def upgrade(): op.add_column('predefined_apps', sa.Column('qualifier', sa.String(40), nullable=True, index=True)) Pa = sa.Table('predefined_apps', sa.MetaData(), sa.Column('qualifier')) session = sa.orm.Session(bind=op.get_bind()) for pa in session.query(Pa): sha = sha1() sha.update(str(datetime.now())) pa.qualifier = sha.hexdigest() session.commit() op.alter_column('predefined_apps', 'qualifier', server_default='', nullable=False)
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### conn = op.get_bind() conn.execute(text(""" INSERT INTO "Permission" (name, default_value, course_permission) SELECT 'can_edit_own_password', true, false WHERE NOT EXISTS (SELECT 1 FROM "Permission" WHERE name = 'can_edit_own_password') """)) # ### end Alembic commands ###
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### conn = op.get_bind() conn.execute(text(""" UPDATE "File" SET name = CASE WHEN extension = '' THEN name ELSE name || '.' || extension END WHERE is_directory = false; """)) op.drop_column('File', 'extension') # ### end Alembic commands ###