我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用MySQLdb.OperationalError()。
def apply_migration(self, name, statements, fake=False): import psycopg2 connection = self.get_connection() with connection.cursor() as cursor: try: if not fake and statements.strip(): cursor.execute(statements) except ( psycopg2.OperationalError, psycopg2.ProgrammingError) as e: connection.rollback() print(e.message) from sqlibrist.helpers import ApplyMigrationFailed raise ApplyMigrationFailed else: cursor.execute('INSERT INTO sqlibrist.migrations ' '(migration) VALUES (%s);', [name.split('/')[-1]]) connection.commit()
def install_server(db): db.socket = str(db.installation_dir.join('mysql.sock')) db.installation_dir.ensure_dir('tmp') mycnf = db.installation_dir.join('my.cnf') mycnf.write('[mysqld]\n' 'datadir=%(installation_dir)s\n' #'log\n' #'log-error\n' 'skip-networking\n' 'socket=mysql.sock\n' 'pid-file=mysqld.pid\n' 'tmpdir=tmp\n' % { 'installation_dir': db.installation_dir }) subprocess.check_call(['mysql_install_db', '--defaults-file=' + str(mycnf)]) server = subprocess.Popen(['mysqld', '--defaults-file=' + str(mycnf)]) import time, MySQLdb tries = 30 for t in range(tries): try: with db.root() as cur: cur.execute("CREATE USER 'abe'@'localhost' IDENTIFIED BY 'Bitcoin'") return server except MySQLdb.OperationalError as e: if t+1 == tries: raise e time.sleep(1)
def install_server(db): db.bindir = subprocess.Popen(['pg_config', '--bindir'], stdout=subprocess.PIPE).communicate()[0].rstrip() subprocess.check_call([ os.path.join(db.bindir, 'initdb'), '-D', str(db.installation_dir), '-U', 'postgres']) server = subprocess.Popen([ os.path.join(db.bindir, 'postgres'), '-D', str(db.installation_dir), '-c', 'listen_addresses=', '-c', 'unix_socket_directory=.']) import time, psycopg2 tries = 30 for t in range(tries): try: with db.root() as cur: cur.execute("COMMIT") # XXX cur.execute("CREATE USER abe UNENCRYPTED PASSWORD 'Bitcoin'") cur.execute("COMMIT") return server except psycopg2.OperationalError as e: if t+1 == tries: raise e time.sleep(1)
def query(self, sql, use_dict=True, retry=0): if retry < 0: retry = 0 retry = int(retry) # the first attempt does not count as 'retry' for i in range(retry + 1): try: with self() as conn: return conn_query(conn, sql, use_dict=use_dict) except MySQLdb.OperationalError as e: if len(e.args) > 0 and e[0] in retriable_err: logger.info( repr(e) + " conn_query error {sql}".format(sql=sql)) continue else: raise else: raise
def unapply_migration(self, name, statements, fake=False): import psycopg2 connection = self.get_connection() with connection.cursor() as cursor: try: if not fake: cursor.execute(statements) except ( psycopg2.OperationalError, psycopg2.ProgrammingError) as e: connection.rollback() print(e.message) from sqlibrist.helpers import ApplyMigrationFailed raise ApplyMigrationFailed else: cursor.execute('DELETE FROM sqlibrist.migrations ' 'WHERE migration = (%s); ', [name]) connection.commit()
def apply_migration(self, name, statements, fake=False): import MySQLdb connection = self.get_connection() cursor = connection.cursor() try: if not fake and statements.strip(): cursor.execute(statements) except (MySQLdb.OperationalError, MySQLdb.ProgrammingError) as e: print('\n'.join(map(str, e.args))) from sqlibrist.helpers import ApplyMigrationFailed raise ApplyMigrationFailed else: cursor.execute('INSERT INTO sqlibrist_migrations ' '(migration) VALUES (%s);', [name.split('/')[-1]])
def unapply_migration(self, name, statements, fake=False): import MySQLdb connection = self.get_connection() cursor = connection.cursor() try: if not fake: cursor.execute(statements) except (MySQLdb.OperationalError, MySQLdb.ProgrammingError) as e: print('\n'.join(map(str, e.args))) from sqlibrist.helpers import ApplyMigrationFailed raise ApplyMigrationFailed else: cursor.execute('DELETE FROM sqlibrist_migrations ' 'WHERE migration = (%s); ', [name])
def _connect(self): """Function connects to the database""" logger.debug('Connecting to MySQL database.') try: if str(self.logsocket).lower() == 'tcp': self.connection = MySQLdb.connect( host=self.host, port=self.port, user=self.username, passwd=self.passphrase, db=self.db) elif str(self.logsocket).lower() == 'dev': self.connection = MySQLdb.connect( unix_socket=self.logdevice, user=self.username, passwd=self.passphrase, db=self.db) self._create_database() except (AttributeError, MySQLdb.OperationalError): logger.exception('Exception: Cannot connect to database.')
def insert(self, attack_event): """Function inserts attack-related data into the database Args: attack_event : dictionary containing attack-related information """ cursor = self.connection.cursor() try: cursor.execute(""" INSERT INTO events (ethernet_source, ip_source, port_source, ethernet_destination, ip_destination, port_destination, ethernet_type, protocol, info, raw_packet) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)""", ( str(attack_event["ethernet_src"]), str(attack_event["ip_src"]), str(attack_event["port_src"]), str(attack_event["ethernet_dst"]), str(attack_event["ip_dst"]), str(attack_event["port_dst"]), str(attack_event["ethernet_type"]), str(attack_event["protocol"]), str(attack_event["info"]), str(attack_event["raw_pkt"]) )) self.connection.commit() except (AttributeError, MySQLdb.OperationalError): logger.error('Error: Cannot insert attack event into database.')
def sqlFunc(func): def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except OperationalError as e: if 'MySQL server has gone away' in str(e): sql_operator.connect() return func(*args, **kwargs) print e return wrapper
def __init__(self, global_configs=None): """Initialize the db connector. Args: global_configs (dict): Global configurations. Raises: MySQLError: An error with MySQL has occurred. """ try: self.conn = MySQLdb.connect( host=global_configs['db_host'], user=global_configs['db_user'], db=global_configs['db_name'], local_infile=1) except OperationalError as e: LOGGER.error('Unable to create mysql connector:\n%s', e) raise MySQLError('DB Connector', e)
def _get_snapshot_table(self, resource_name, timestamp): """Returns a snapshot table name. Args: resource_name (str): String of the resource name. timestamp (str): String of timestamp, formatted as YYYYMMDDTHHMMSSZ. Returns: str: String of the created snapshot table. """ try: snapshot_table_name = self.create_snapshot_table( resource_name, timestamp) except OperationalError: # TODO: find a better way to handle this. I want this method # to be resilient when the table has already been created # so that it can support inserting new data. This will catch # a sql 'table already exist' error and alter the flow. snapshot_table_name = self._create_snapshot_table_name( resource_name, timestamp) return snapshot_table_name
def select_record_count(self, resource_name, timestamp): """Select the record count from a snapshot table. Args: resource_name (str): String of the resource name, which is embedded in the table name. timestamp (str): String of timestamp, formatted as YYYYMMDDTHHMMSSZ. Returns: int: Integer of the record count in a snapshot table. Raises: MySQLError: When an error has occured while executing the query. """ try: record_count_sql = select_data.RECORD_COUNT.format( resource_name, timestamp) cursor = self.conn.cursor() cursor.execute(record_count_sql) return cursor.fetchone()[0] except (DataError, IntegrityError, InternalError, NotSupportedError, OperationalError, ProgrammingError) as e: raise MySQLError(resource_name, e)
def select_group_ids(self, resource_name, timestamp): """Select the group ids from a snapshot table. Args: resource_name (str): String of the resource name. timestamp (str): String of timestamp, formatted as YYYYMMDDTHHMMSSZ. Returns: list: A list of group ids. Raises: MySQLError: When an error has occured while executing the query. """ try: group_ids_sql = select_data.GROUP_IDS.format(timestamp) cursor = self.conn.cursor(cursorclass=cursors.DictCursor) cursor.execute(group_ids_sql) rows = cursor.fetchall() return [row['group_id'] for row in rows] except (DataError, IntegrityError, InternalError, NotSupportedError, OperationalError, ProgrammingError) as e: raise MySQLError(resource_name, e)
def execute_sql_with_commit(self, resource_name, sql, values): """Executes a provided sql statement with commit. Args: resource_name (str): String of the resource name. sql (str): String of the sql statement. values (tuple): Tuple of string for sql placeholder values. Raises: MySQLError: When an error has occured while executing the query. """ try: cursor = self.conn.cursor() cursor.execute(sql, values) self.conn.commit() except (DataError, IntegrityError, InternalError, NotSupportedError, OperationalError, ProgrammingError) as e: raise MySQLError(resource_name, e)
def mysql_requirement(_f): verbose = os.environ.get('eventlet_test_mysql_verbose') if MySQLdb is None: if verbose: print(">> Skipping mysql tests, MySQLdb not importable", file=sys.stderr) return False try: auth = tests.get_database_auth()['MySQLdb'].copy() MySQLdb.connect(**auth) return True except MySQLdb.OperationalError: if verbose: print(">> Skipping mysql tests, error when connecting:", file=sys.stderr) traceback.print_exc() return False
def run_ddl(self, db, sql): log.debug("Creating table using: \n {}".format(sql)) try: self.use_db(db) self.execute_sql(sql) except (MySQLdb.OperationalError, MySQLdb.ProgrammingError) as e: errnum, errmsg = e.args log.error( "SQL execution error: [{}] {}\n" "When executing: {}\n" "With args: {}" .format(errnum, errmsg, self._sql_now, self._sql_args_now)) raise OSCError('GENERIC_MYSQL_ERROR', {'stage': "running DDL on db '{}'".format(db), 'errnum': errnum, 'errmsg': errmsg}, mysql_err_code=errnum)
def select_full_table_into_outfile(self): stage_start_time = time.time() try: outfile = '{}.1'.format(self.outfile) affected_rows = self.execute_sql( sql.select_full_table_into_file( self._pk_for_filter, self.table_name, self.is_skip_fcache_supported, self.where), (outfile, )) self.outfile_suffix_end = 1 self.stats['outfile_lines'] = affected_rows self._cleanup_payload.add_file_entry(outfile) self.commit() except MySQLdb.OperationalError as e: errnum, errmsg = e.args # 1086: File exists if errnum == 1086: raise OSCError('FILE_ALREADY_EXIST', {'file': outfile}) else: raise self.stats['time_in_dump'] = time.time() - stage_start_time
def change_explicit_commit(self, enable=True): """ Turn on/off rocksdb_commit_in_the_middle to avoid commit stall for large data infiles """ v = 1 if enable else 0 try: self.execute_sql( sql.set_session_variable('rocksdb_commit_in_the_middle'), (v,)) except MySQLdb.OperationalError as e: errnum, errmsg = e.args # 1193: unknown variable if errnum == 1193: log.warning( "Failed to set rocksdb_commit_in_the_middle: {}" .format(errmsg)) else: raise
def change_rocksdb_bulk_load(self, enable=True): # rocksdb_bulk_load relies on data being dumping in the same sequence # as new pk. If we are changing pk, then we cannot ensure that if self._old_table.primary_key != self._new_table.primary_key: log.warning("Skip rocksdb_bulk_load, because we are changing PK") return v = 1 if enable else 0 try: self.execute_sql( sql.set_session_variable('rocksdb_bulk_load'), (v,)) except MySQLdb.OperationalError as e: errnum, errmsg = e.args # 1193: unknown variable if errnum == 1193: log.warning( "Failed to set rocksdb_bulk_load: {}".format(errmsg)) else: raise
def insert_db(ipdate): lock = threading.Lock() try: # ???????????????UTF-8??????????? lock.acquire() conn = env.conn cur = conn.cursor() # ???????? # ???? ISOTIMEFORMAT = '%Y-%m-%d %X' ipdate.append(time.strftime(ISOTIMEFORMAT, time.localtime())) log.step_normal( 'ipdate:[%s]' % ipdate) cur.execute( "INSERT INTO ip_log(ip,address,keyword,url,click,error,page,rank,created_at) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)", ipdate) cur.close() # ???? conn.commit() # ????????????????????????????????? # conn.close() # ????????????????? lock.release() log.step_normal('???????') except (MySQLdb.OperationalError,Exception) as e: log.step_warning('???????:[%s]' % e)
def get_master_mysql_major_version(instance): """ Given an instance, determine the mysql major version for the master of the replica set. Args: instance - a hostaddr object Returns - A string similar to '5.5' or '5.6' """ zk = MysqlZookeeper() master = zk.get_mysql_instance_from_replica_set( instance.get_zk_replica_set()[0], repl_type=REPLICA_ROLE_MASTER) try: mysql_version = get_global_variables(master)['version'][:3] except _mysql_exceptions.OperationalError: raise Exception('Could not connect to master server {instance} in ' 'order to determine MySQL version to launch with. ' 'Perhaps run this script from there? This is likely ' 'due to firewall rules.' ''.format(instance=instance.hostname)) return mysql_version
def markasdone(self, job, success, elapsedtime, error=None): """ log a job as being processed (either successfully or not) """ query = """ INSERT INTO modw_supremm.`process` (jobid, process_version, process_timestamp, process_time) VALUES (%s, %s, NOW(), %s) ON DUPLICATE KEY UPDATE process_version = %s, process_timestamp = NOW(), process_time = %s """ if error != None: version = -1000 - error else: version = Accounting.PROCESS_VERSION if success else -1 * Accounting.PROCESS_VERSION data = (job.job_pk_id, version, elapsedtime, version, elapsedtime) if self.madcon == None: self.madcon = getdbconnection(self.dbsettings, False) cur = self.madcon.cursor() try: cur.execute(query, data) except OperationalError: logging.warning("Lost MySQL Connection. Attempting single reconnect") self.madcon = getdbconnection(self.dbsettings, False) cur = self.madcon.cursor() cur.execute(query, data) self.madcon.commit()
def insert(self, resource_id, hostname, filename, start, end, jobid): """ Insert an archive record """ try: self.insertImpl(resource_id, hostname, filename, start, end, jobid) except OperationalError: logging.error("Lost MySQL Connection. Attempting single reconnect") self.con = getdbconnection(self.dbconfig) self.insertImpl(resource_id, hostname, filename, start, end, jobid)
def _test_db_conn(self): import MySQLdb try: MySQLdb.connect(host=self.db_host, port=int(self.db_port), user=self.db_user, passwd=self.db_pass, db=self.db) color_print('???????', 'green') return True except MySQLdb.OperationalError, e: color_print('??????? %s' % e, 'red') return False
def test_query_retry(self): pool = self.pool sql = ( 'set session wait_timeout=1;' ) pool.query(sql) pool.query('show variables like "%timeout%";') with pool() as conn: time.sleep(2) with self.assertRaises(MySQLdb.OperationalError): print conn.query('show databases') # no error raise from above, thus a timed out conn has been left in # pool stat = pool('stat') dd('stat after timeout', stat) self.assertEqual(1, stat['create'], 'created 1 conn') # use previous conn, timed out and retry. pool.query('show databases', retry=1) stat = pool('stat') dd('stat after retry', stat) self.assertEqual(2, stat['create'], 'created another conn for retry')
def execute(self, query, args=None): try: # args is None means no string interpolation return self.cursor.execute(query, args) except Database.OperationalError as e: # Map some error codes to IntegrityError, since they seem to be # misclassified and Django would prefer the more logical place. if e.args[0] in self.codes_for_integrityerror: six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2]) raise
def executemany(self, query, args): try: return self.cursor.executemany(query, args) except Database.OperationalError as e: # Map some error codes to IntegrityError, since they seem to be # misclassified and Django would prefer the more logical place. if e.args[0] in self.codes_for_integrityerror: six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2]) raise
def execute(self,sql): '''exec sql insert''' ret = 0 try: cursor = self.conn.cursor() ret = cursor.execute(sql) except (AttributeError,MySQLdb.OperationalError, MySQLdb.DatabaseError): logger.error(traceback.format_exc()) logger.error("sql:{0}".format(sql)) self.connect() if self.conn: cursor = self.conn.cursor() ret = cursor.execute(sql) return ret
def execute(self, sql): try: cursor = self.db.cursor() cursor.execute(sql) self.db.commit() except (AttributeError, MySQLdb.OperationalError): self.connect() cursor = self.db.cursor() cursor.execute(sql) self.db.commit() return cursor
def exec_sql(self, sql): conn = self._get_conn() try: with conn as cur: cur.execute(sql) return cur.fetchall() except MySQLdb.ProgrammingError as e: LOG.error("execute sql ({0}) error {1}".format(sql, e)) raise e except MySQLdb.OperationalError as e: # create connection if connection has interrupted conn = self._create_new_conn() raise e finally: self._put_conn(conn)
def query(self, sql): try: cursor = self.conn.cursor() cursor.execute(sql) except (AttributeError, mdb.OperationalError): self.connect() cursor = self.conn.cursor() cursor.execute(sql) return cursor
def grant_exists(self, db_name, db_user, remote_ip): cursor = self.connection.cursor() priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \ "TO '{}'@'{}'".format(db_name, db_user, remote_ip) try: cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user, remote_ip)) grants = [i[0] for i in cursor.fetchall()] except MySQLdb.OperationalError: return False finally: cursor.close() # TODO: review for different grants return priv_string in grants
def run_conn_fun(fun,*args): try: global conn result=yield getattr(conn,fun)(*args) except (MySQLdb.OperationalError,adbapi.ConnectionLost): try: conn.close() except: pass conn=adbapi.ConnectionPool("MySQLdb",host=MYSQLCONFIG["host"],user=MYSQLCONFIG["user"],passwd=MYSQLCONFIG["passwd"],charset=MYSQLCONFIG["charset"],port=MYSQLCONFIG["port"],db=MYSQLCONFIG["db"],cp_reconnect=True) result=yield getattr(conn,fun)(*args) defer.returnValue(result)
def load_data(self, resource_name, timestamp, data): """Load data into a snapshot table. Args: resource_name (str): String of the resource name. timestamp (str): String of timestamp, formatted as YYYYMMDDTHHMMSSZ. data (iterable): An iterable or a list of data to be uploaded. Raises: MySQLError: When an error has occured while executing the query. """ with csv_writer.write_csv(resource_name, data) as csv_file: try: snapshot_table_name = self._create_snapshot_table_name( resource_name, timestamp) load_data_sql = load_data_sql_provider.provide_load_data_sql( resource_name, csv_file.name, snapshot_table_name) LOGGER.debug('SQL: %s', load_data_sql) cursor = self.conn.cursor() cursor.execute(load_data_sql) self.conn.commit() # TODO: Return the snapshot table name so that it can be tracked # in the main snapshot table. except (DataError, IntegrityError, InternalError, NotSupportedError, OperationalError, ProgrammingError) as e: raise MySQLError(resource_name, e)
def get_latest_snapshot_timestamp(self, statuses): """Select the latest timestamp of the completed snapshot. Args: statuses (tuple): The tuple of snapshot statuses to filter on. Returns: str: The string timestamp of the latest complete snapshot. Raises: MySQLError: When no rows are found. """ # Build a dynamic parameterized query string for filtering the # snapshot statuses if not isinstance(statuses, tuple): statuses = ('SUCCESS',) status_params = ','.join(['%s']*len(statuses)) filter_clause = SNAPSHOT_STATUS_FILTER_CLAUSE.format(status_params) try: cursor = self.conn.cursor() cursor.execute( select_data.LATEST_SNAPSHOT_TIMESTAMP + filter_clause, statuses) row = cursor.fetchone() if row: return row[0] raise NoResultsError('No snapshot cycle found.') except (DataError, IntegrityError, InternalError, NotSupportedError, OperationalError, ProgrammingError, NoResultsError) as e: raise MySQLError('snapshot_cycles', e)
def execute(self, *args, **kwargs): try: result = self._cursor.execute(*args, **kwargs) except MySQLdb.ProgrammingError as e: raise MySQLdb.ProgrammingError(e.args[0], e.args[1] + '.\nSTATEMENT: {}'.format(self._cursor._last_executed)) except MySQLdb.OperationalError as e: # Sometimes a MySQL session times out. In this case, we wish to reconnect, and reissue the query. if e[0] == 2006: self.connect() result = self._cursor.execute(*args, **kwargs) else: raise MySQLdb.OperationalError(e.args[0], e.args[1] + '.\nSTATEMENT: {}'.format(self._cursor._last_executed)) return result