我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logging.warn()。
def sync(src, dest, module, opts=None): # Sync charmhelpers/__init__.py for bootstrap code. sync_pyfile(_src_path(src, '__init__'), dest) # Sync other __init__.py files in the path leading to module. m = [] steps = module.split('.')[:-1] while steps: m.append(steps.pop(0)) init = '.'.join(m + ['__init__']) sync_pyfile(_src_path(src, init), os.path.dirname(_dest_path(dest, init))) # Sync the module, or maybe a .py file. if os.path.isdir(_src_path(src, module)): sync_directory(_src_path(src, module), _dest_path(dest, module), opts) elif _is_pyfile(_src_path(src, module)): sync_pyfile(_src_path(src, module), os.path.dirname(_dest_path(dest, module))) else: logging.warn('Could not sync: %s. Neither a pyfile or directory, ' 'does it even exist?' % module)
def _get_result(self, document, *args, **kwargs): if not self.retries: return self.transport.execute(document, *args, **kwargs) last_exception = None retries_count = 0 while retries_count < self.retries: try: result = self.transport.execute(document, *args, **kwargs) return result except Exception as e: last_exception = e logging.warn("Request failed with exception %s. Retrying for the %s time...", e, retries_count + 1, exc_info=True) finally: retries_count += 1 raise RetryError(retries_count, last_exception)
def thread_entry(self): """Entry point for consumption happening in the thread""" try: # Keep beffering for some time self._buffer_start = time.time() while self._q.qsize() < self._video_buffer_sz and not self._stop_thread: logging.info('Buffering. Q: %s', self._q.qsize()) time.sleep(0.25) # Set the start time self._start_time = time.time() # Run until requested to stop while not self._stop_thread: self.__consume() time.sleep(1 / self._fps) # When stopped - indicate stop time self._stop_time = time.time() return except KeyboardInterrupt: logging.warn('Except in consume thread!') pass
def _cancel_order(self, order_id): response = self.market.cancel(order_id) if not response: return response if response and "code" in response: logging.warn (response) return False resp_order_id = response['order_id'] if resp_order_id == -1: logging.warn("cancel order #%s failed, %s" % (order_id, resp_order_id)) return False else: logging.debug("Canceled order #%s ok" % (order_id)) return True return True
def get_info(self): """Get balance""" try: response = self.market.accountInfo() if response and "code" in response: logging.warn(response) return False raise TradeException(response["message"]) if response: self.btc_balance = float(response["available_btc_display"]) self.cny_balance = float(response["available_cny_display"]) self.btc_frozen = float(response["frozen_btc_display"]) self.cny_frozen = float(response["frozen_cny_display"]) except Exception as ex: logging.warn("get_info failed :%s" % ex) traceback.print_exc() return False
def notify_msg(self, type, price): import zmq try: context = zmq.Context() socket = context.socket(zmq.PUSH) socket.connect ("tcp://%s:%s" % (config.ZMQ_HOST, config.ZMQ_PORT)) time.sleep(1) message = {'type':type, 'price':price} logging.info( "notify message %s", json.dumps(message)) socket.send_string(json.dumps(message)) except Exception as e: logging.warn("notify_msg Exception") pass
def handle(self): client_ip = self.client_address[0] addr = '' server = '' try: sock = self.connection sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) odestdata = sock.getsockopt(socket.SOL_IP, 80, 16) port, addr_ip = struct.unpack("!xxH4sxxxxxxxx", odestdata) addr = socket.inet_ntoa(addr_ip) server = reverse(addr) print_log('%s connecting %s:%d %d %s' % (client_ip, addr, port, server[0], str(server[1]))) Proxy[server[0]].proxy(sock, server[1], (addr, port)) except socket.error, e: logging.warn(addr + ':' + str(server) + ':' + str(e)) sock.close()
def pull_db_all_user(self): import cymysql #????????? if self.cfg["ssl_enable"] == 1: conn = cymysql.connect(host=self.cfg["host"], port=self.cfg["port"], user=self.cfg["user"], passwd=self.cfg["password"], db=self.cfg["db"], charset='utf8', ssl={'ca':self.cfg["ssl_ca"],'cert':self.cfg["ssl_cert"],'key':self.cfg["ssl_key"]}) else: conn = cymysql.connect(host=self.cfg["host"], port=self.cfg["port"], user=self.cfg["user"], passwd=self.cfg["password"], db=self.cfg["db"], charset='utf8') try: rows = self.pull_db_users(conn) finally: conn.close() if not rows: logging.warn('no user in db') return rows
def pull_db_all_user(self): import json rows = None config_path = get_config().MUDB_FILE with open(config_path, 'rb+') as f: rows = json.loads(f.read().decode('utf8')) for row in rows: try: if 'forbidden_ip' in row: row['forbidden_ip'] = common.IPNetwork(row['forbidden_ip']) except Exception as e: logging.error(e) try: if 'forbidden_port' in row: row['forbidden_port'] = common.PortRange(row['forbidden_port']) except Exception as e: logging.error(e) if not rows: logging.warn('no user in json file') return rows
def update_mu_users(self, port, users): port = int(port) if port in self.tcp_servers_pool: try: self.tcp_servers_pool[port].update_users(users) except Exception as e: logging.warn(e) try: self.udp_servers_pool[port].update_users(users) except Exception as e: logging.warn(e) if port in self.tcp_ipv6_servers_pool: try: self.tcp_ipv6_servers_pool[port].update_users(users) except Exception as e: logging.warn(e) try: self.udp_ipv6_servers_pool[port].update_users(users) except Exception as e: logging.warn(e)
def _socket_bind_addr(self, sock, af): bind_addr = '' if self._bind and af == socket.AF_INET: bind_addr = self._bind elif self._bindv6 and af == socket.AF_INET6: bind_addr = self._bindv6 else: bind_addr = self._accept_address[0] bind_addr = bind_addr.replace("::ffff:", "") if bind_addr in self._ignore_bind_list: bind_addr = None if bind_addr: local_addrs = socket.getaddrinfo(bind_addr, 0, 0, socket.SOCK_STREAM, socket.SOL_TCP) if local_addrs[0][0] == af: logging.debug("bind %s" % (bind_addr,)) try: sock.bind((bind_addr, 0)) except Exception as e: logging.warn("bind %s fail" % (bind_addr,))
def handle_event(self, sock, fd, event): if sock != self._sock: return if event & eventloop.POLL_ERR: logging.error('dns socket err') self._loop.remove(self._sock) self._sock.close() # TODO when dns server is IPv6 self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.SOL_UDP) self._sock.setblocking(False) self._loop.add(self._sock, eventloop.POLL_IN, self) else: data, addr = sock.recvfrom(1024) if addr not in self._servers: logging.warn('received a packet other than our dns') return self._handle_data(data)
def _socket_bind_addr(self, sock, af): bind_addr = '' if self._bind and af == socket.AF_INET: bind_addr = self._bind elif self._bindv6 and af == socket.AF_INET6: bind_addr = self._bindv6 bind_addr = bind_addr.replace("::ffff:", "") if bind_addr in self._ignore_bind_list: bind_addr = None if bind_addr: local_addrs = socket.getaddrinfo(bind_addr, 0, 0, socket.SOCK_DGRAM, socket.SOL_UDP) if local_addrs[0][0] == af: logging.debug("bind %s" % (bind_addr,)) try: sock.bind((bind_addr, 0)) except Exception as e: logging.warn("bind %s fail" % (bind_addr,))
def insert(self, connection_id): if not self.enable: logging.warn('obfs auth: not enable') return False if not self.is_active(): self.re_enable(connection_id) self.update() if connection_id < self.front: logging.warn('obfs auth: deprecated id, someone replay attack') return False if connection_id > self.front + 0x4000: logging.warn('obfs auth: wrong id') return False if connection_id in self.alloc: logging.warn('obfs auth: duplicate id, someone replay attack') return False if self.back <= connection_id: self.back = connection_id + 1 self.alloc[connection_id] = 1 while (self.front in self.alloc) or self.front + 0x1000 < self.back: if self.front in self.alloc: del self.alloc[self.front] self.front += 1 self.addref() return True
def insert(self, connection_id): if not self.enable: logging.warn('obfs auth: not enable') return False if not self.is_active(): self.re_enable(connection_id) self.update() if connection_id < self.front: logging.warn('obfs auth: deprecated id, someone replay attack') return False if connection_id > self.front + 0x4000: logging.warn('obfs auth: wrong id') return False if connection_id in self.alloc: logging.warn('obfs auth: duplicate id, someone replay attack') return False if self.back <= connection_id: self.back = connection_id + 1 self.alloc[connection_id] = 1 while (self.front in self.alloc) or self.front + 0x1000 < self.back: if self.front in self.alloc: del self.alloc[self.front] self.front += 1 return True
def insert(self, client_id, connection_id): if self.client_id.get(client_id, None) is None or not self.client_id[client_id].enable: if self.client_id.first() is None or len(self.client_id) < self.max_client: if client_id not in self.client_id: #TODO: check self.client_id[client_id] = client_queue(connection_id) else: self.client_id[client_id].re_enable(connection_id) return self.client_id[client_id].insert(connection_id) if not self.client_id[self.client_id.first()].is_active(): del self.client_id[self.client_id.first()] if client_id not in self.client_id: #TODO: check self.client_id[client_id] = client_queue(connection_id) else: self.client_id[client_id].re_enable(connection_id) return self.client_id[client_id].insert(connection_id) logging.warn('auth_sha1_v2: no inactive client') return False else: return self.client_id[client_id].insert(connection_id)
def depaginate(self, response, data_key=None): logging.debug('Attempting to depaginate response from {}'.format(response.url)) all_data = [] this_data = self.extract_data_from_response(response, data_key=data_key) if this_data is not None: if type(this_data) == list: all_data += this_data else: all_data.append(this_data) if self.has_pagination_links(response): pagination_links = self.extract_pagination_links(response) while 'next' in pagination_links: response = self.session.get(pagination_links['next']) pagination_links = self.extract_pagination_links(response) this_data = self.extract_data_from_response(response, data_key=data_key) if this_data is not None: if type(this_data) == list: all_data += this_data else: all_data.append(this_data) else: logging.warn('Response from {} has no pagination links.'.format(response.url)) return all_data
def approve(user): """Approve a registered user so that they can use API endpoints. :param user: an instance of the `KlaxerUser` object to verify :returns: an instance of the user that matched after updating it :rtype: `klaxer.users.KlaxerUser` """ if user.approved: logging.warn('noop - User %d already approved', user.id) return user user.approved = True for message in user.messages: if message.text == config.MSG_WELCOME: session.delete(message) session.add(user) session.commit() return user
def update_charts(q, event, size): try: os.nice(10) except AttributeError: logging.warn("can't be nice to windows") q.put((CRAWL_MESSAGE, 4, 'Chart engine starting...')) base_map = graphics.create_map() last_qso_timestamp = 0 q.put((CRAWL_MESSAGE, 4, '')) try: while not event.is_set(): t0 = time.time() last_qso_timestamp = load_data(size, q, base_map, last_qso_timestamp) t1 = time.time() delta = t1 - t0 update_delay = config.DATA_DWELL_TIME - delta if update_delay < 0: update_delay = config.DATA_DWELL_TIME logging.debug('Next data update in %f seconds', update_delay) event.wait(update_delay) except Exception, e: logging.exception('Exception in update_charts', exc_info=e) q.put((CRAWL_MESSAGE, 4, 'Chart engine failed.', graphics.YELLOW, graphics.RED))
def post(self, id): job = models.RemindJob.get_by_id(int(id)) user = auth.get_current_user() if job is None: raise HTTPNotFound() if job.user_id != user.user_id(): raise HTTPForbidden() if job.state != 'scheduled': logging.warn('check_reply for non-scheduled job not possible') raise HTTPBadRequest() reply = validation.disabled_reply_schema(self.json) job.disabled_reply = models.DisabledReply( message_id=reply['messageId'], from_name=reply['fromName'], from_email=reply['fromEmail']) job.add_to_check_reply_queue() self.response_json(None)
def disable_if_replied(self, auth_token): """ Checks if there was a reply and disables job if there was. """ logging.info('processing disable_if_replied for {}'.format(self.key)) if self.state != 'checking': logging.warn('job not in checking state, skipping') return if not self.only_if_noreply: logging.warn('only_if_noreply not configured, skipping') self.state = 'scheduled' self.put() return mailman = gmail.Mailman(self.user_email, auth_token) try: reply = self.find_reply(mailman) finally: mailman.quit() if reply is not None: logging.info('reply found, disabling job') self.state = 'disabled' self.disabled_reply = DisabledReply.from_gmail_dict(reply) self.put() else: self.state = 'scheduled' self.put()
def roster_cloud(self): '''Query roster JSON from cloud''' success, code, message, text = self.verbose_cloud_request({ 'operation': 'sharedroster', 'username': self.username, 'domain': self.authDomain }) if success: if code is not None and code != requests.codes.ok: return code, None else: sr = None try: sr = message['data']['sharedRoster'] return sr, text except Exception, e: logging.warn('Weird response: ' + str(e)) return message, text else: return False, None
def read_request(cls): field_no = 0 fields = [None, None, None, None] length_field = sys.stdin.read(2) while len(length_field) == 2: (size,) = unpack('>H', length_field) val = sys.stdin.read(size) if len(val) != size: logging.warn('premature EOF while reading field %d: %d != %d' % (field_no, len(val), size)) return fields[field_no] = val field_no = (field_no + 1) % 4 if field_no == 0: logging.debug('from_saslauthd got %s, %s, %s, %s' % tuple(fields)) yield ('auth', fields[0], fields[3], fields[1]) length_field = sys.stdin.read(2)
def roster_background_thread(self, sr): '''Entry for background roster update thread''' try: logging.debug('roster_thread for ' + str(sr)) # Allow test hooks with static ejabberd_controller if hasattr(self.ctx, 'ejabberd_controller') and self.ctx.ejabberd_controller is not None: e = self.ctx.ejabberd_controller else: e = ejabberdctl(self.ctx) groups, commands = self.roster_update_users(e, sr) self.roster_update_groups(e, groups) # For some reason, the vcard changes are not pushed to the clients. Rinse and repeat. # Maybe not necessary with synchronous thread? # for cmd in commands: # e.execute(cmd) self.ctx.shared_roster_db.sync() except AttributeError: pass # For tests except Exception, err: (etype, value, tb) = sys.exc_info() traceback.print_exception(etype, value, tb) logging.warn('roster_groups thread: %s:\n%s' % (str(err), ''.join(traceback.format_tb(tb)))) return False
def load_weights(fname, params): # params = lasagne.layers.get_all_params(l_out,trainable=True)+[log_sigma]+[x for x in lasagne.layers.get_all_params(l_out) if x.name[-4:]=='mean' or x.name[-7:]=='inv_std'] names = [ par.name for par in params ] if len(names)!=len(set(names)): raise ValueError('need unique param names') param_dict = np.load(fname) for param in params: if param.name in param_dict: stored_shape = np.asarray(param_dict[param.name].shape) param_shape = np.asarray(param.get_value().shape) if not np.all(stored_shape == param_shape): warn_msg = 'shape mismatch:' warn_msg += '{} stored:{} new:{}'.format(param.name, stored_shape, param_shape) warn_msg += ', skipping' warnings.warn(warn_msg) else: param.set_value(param_dict[param.name]) else: logging.warn('unable to load parameter {} from {}'.format(param.name, fname)) if 'metadata' in param_dict: metadata = pickle.loads(str(param_dict['metadata'])) else: metadata = {} return metadata
def main(args): num, den = args try: num, den = float(num), float(den) except ValueError as e: logging.error('Invalid input') return INVALID_INPUT if den == 0: # this is a run-time error but not a type error # can be considered a warning or an error based on use case # written here as mere warning. logging.warn('Invalid denominator input!') return DIV_BY_ZERO_EXIT if math.isnan(num) or math.isnan(den): return INVALID_INPUT_NAN if math.isinf(num) or math.isinf(den): return INVALID_INPUT_INF print('Answer: ' + str(num / den)) return 0
def testing(label, cache, loop): def w(g): return asyncio.wait_for(g, args.timeout, loop=loop) key = 'foo-%s' % label while True: logging.info('%s %s', label, '-'*20) try: ret = yield from w(cache.set(key, 'hello-%s-world' % label)) logging.info('%s set %s', label, ret) ret = yield from w(cache.get(key)) logging.info('%s get %s', label, ret) ret = yield from w(cache.delete(key)) logging.info('%s del %s', label, ret) ret = yield from w(cache.get(key)) logging.info('%s get2 %s', label, ret) except asyncio.TimeoutError: logging.warn('%s Timeout', label) except Exception as exc: logging.exception('%s Client exception: %r', label, exc) break
def _logWriter(self,level,message,exception=None): self._logger.setLevel(level) self._fh.setLevel(level) self._ch.setLevel(level) if(exception!=None): exFormatted = self._formatException(exception) msg = "%s%s" % (message,exFormatted) if(level==logging.DEBUG): logging.debug(msg) elif(level==logging.INFO): logging.info(msg) elif(level==logging.WARN): logging.warn(msg) elif(level==logging.FATAL): logging.fatal(msg) if(level==logging.ERROR): logging.error(msg)
def read(self, **kwargs): """Read this timeseries from file using GWpy. If the file is not present, an IOError is raised, UNLESS an unsuccessful attempt has been made to download the file, in which case it raises an NDS2Exception (a custom error type).""" try: return gwpy.timeseries.TimeSeries.read(self.fname, **kwargs) except IOError as e: if not self.query_failed(): msg = ('tried concatenating data, but a download attempt seems ' 'not to have been made for this query: {} See IOError ' 'message: {}').format(self, e) logging.error(msg) raise IOError(('Aborting concatenation. Neither an error log ' 'file nor a saved timeseries file were found ' 'for this query: {}').format(self)) else: logging.warn(('While reading, encountered failed query: ' '{}. Padding...').format(self)) raise NDS2Exception(('This query seems to have failed ' 'downloading: {}').format(self))
def _get_missing_m_trend(self, pad='DEFAULT_PAD', **kwargs): """Get a single second of missing data.""" logging.debug('Fetching missing m-trend: {}'.format(self)) missing_buf = self.fetch() # explicitly fetch from NDS2 trend = self.channel.split('.')[1].split(',')[0] # make m-trend value for this minute based on trend extension if len(np.nonzero(missing_buf == -1)[0]) != 0: # this won't actually check for anything at the moment because # gwpy.timeseries.TimeSeries.fetch() does not have a padding option # yet logging.warn('Still missing data in {}'.format(self)) elif trend == 'mean': buf_trend = missing_buf.mean() elif trend == 'min': buf_trend = missing_buf.min() elif trend == 'max': buf_trend = missing_buf.max() elif trend == 'rms': buf_trend = missing_buf.rms(60)[0] elif trend == 'n': buf_trend = missing_buf.sum() else: raise ValueError('Unrecognized trend type: {}'.format(trend)) return buf_trend
def handle_event(self, sock, fd, event): if sock != self._sock: return if event & eventloop.POLL_ERR: logging.error('dns socket err') self._loop.remove(self._sock) self._sock.close() # TODO when dns server is IPv6 self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.SOL_UDP) self._sock.setblocking(False) self._loop.add(self._sock, eventloop.POLL_IN, self) else: data, addr = sock.recvfrom(1024) if addr[0] not in self._servers: logging.warn('received a packet other than our dns') return self._handle_data(data)
def get(self, *args, **kwargs): try: r = self.client.get(*args, **kwargs) # if rate limit reach if r.status_code == 429: seconds = self.wait_time() logging.warn("Rate limit 429 from Weibo API, Sleep %d to try...", seconds) time.sleep(seconds) r = self.get(*args, **kwargs) return r except APIError, e: # if rate limit reach log.error("caught APIError error %s", e) if e.error_code in [10022, 10023, 10024]: seconds = self.wait_time() logging.warn("Rate limit %d from Weibo API, Sleep %d to try...", e.error_code, seconds) time.sleep(seconds) return self.get(*args, **kwargs) else: raise e except requests.exceptions.ConnectionError as e: log.error("caught connection error %s", e) self._connect() return self.get(*args, **kwargs)
def setup_logger(): from colorlog import ColoredFormatter from gettext import gettext as _ # noqa try: """Return a logging obj with a default ColoredFormatter.""" formatter = ColoredFormatter( "%(asctime)s %(name)-12s (%(threadName)-9s) %(log_color)s%(levelname)-8s%(reset)s (%(funcName)-5s) %(message_log_color)s%(message)s", # noqa datefmt=None, reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'bold_red', 'TRACE': 'purple' }, secondary_log_colors={ 'message': { 'ERROR': 'red', 'CRITICAL': 'red', 'DEBUG': 'yellow', 'INFO': 'yellow,bg_blue' } }, style='%' ) handler = logging.StreamHandler() handler.setFormatter(formatter) logging.getLogger('').addHandler(handler) logging.root.setLevel(logging.DEBUG) except ImportError: # No color available, use default config logging.basicConfig(format='%(levelname)s: %(message)s') logging.warn("Disabling color, you really want to install colorlog.")
def get_data_or_download(dir_name, file_name, url='', size='unknown'): """Returns the data. if the data hasn't been downloaded, then first download the data. :param dir_name: directory to look in :param file_name: file name to retrieve :param url: if the file is not found, then download it from this url :param size: the expected size :return: path to the requested file """ dname = os.path.join(stanza.DATA_DIR, dir_name) fname = os.path.join(dname, file_name) if not os.path.isdir(dname): assert url, 'Could not locate data {}, and url was not specified. Cannot retrieve data.'.format(dname) os.makedirs(dname) if not os.path.isfile(fname): assert url, 'Could not locate data {}, and url was not specified. Cannot retrieve data.'.format(fname) logging.warn('downloading from {}. This file could potentially be *very* large! Actual size ({})'.format(url, size)) with open(fname, 'wb') as f: f.write(get_from_url(url)) return fname
def process(self, element): created_at = element.properties.get('created_at', None) cav = None if created_at: cav = created_at.timestamp_value cseconds = cav.seconds else: return crdt = datetime.datetime.fromtimestamp(cseconds) logging.warn("crdt: %s", crdt) logging.warn("earlier: %s", self.earlier) if crdt > self.earlier: # return only the elements (datastore entities) with a 'created_at' date # within the last self.days days. yield element
def fetch_json(self, url): """Fetch remote json""" timeout = 1 while True: try: logging.debug('Opening %s.', url) response = urllib2.urlopen(url) break except urllib2.HTTPError as err: if timeout <= MAX_TIMEOUT: logging.warn('Error opening %s, error code %d, reason is %s.', url, err.code, err.reason) logging.warn('Waiting for %ds before retrying.', timeout) time.sleep(timeout) timeout *= 2 else: logging.error('Error opening %s, error code %d, reason is %s.', url, err.code, err.reason) raise err data = json.load(response) return data
def record(device, file): logging.debug("Recording command to file " + file) # receive packet device.enter_learning() ir_packet = None attempt = 0 while ir_packet is None and attempt < 6: time.sleep(5) ir_packet = device.check_data() attempt = attempt + 1 if ir_packet is not None: # write to file directory = os.path.dirname(file) if not os.path.exists(directory): os.makedirs(directory) with open(file, 'wb') as f: f.write(str(ir_packet).encode('hex')) logging.debug("Done") else: logging.warn("No command received")
def get_player(): """ :returns a radio instance """ radio = None try: radio = player.VlcPlayer() except Exception as e: logging.warn('Failed to load first player option, trying another, %s' % str(e)) radio = player.MpPlayer() finally: return radio
def get_stations(): """ :returns list of all the(available) radio stations fetching from a server helps to manage station list in one place """ stations_json = [] try: print("Fetching stations data from server.") response = requests.get(STATION_FETCH_URL) stations_json = response.json() except Exception as e: logging.warn('Unable to load data from server. Processing local data.') stations_json = get_stations_from_json() finally: return _format_station_json_to_dict(stations_json)
def merge_all_files_into_pytables(file_dir, file_out): """ process each file into pytables """ start = None start = datetime.datetime.now() out_h5 = tables.openFile(file_out, mode="w", title="bars", filters=tables.Filters(complevel=9, complib='zlib')) table = None for file_in in glob.glob(file_dir + "/*.gz"): gzip_file = gzip.open(file_in) expected_header = ["dt", "sid", "open", "high", "low", "close", "volume"] csv_reader = csv.DictReader(gzip_file) header = csv_reader.fieldnames if header != expected_header: logging.warn("expected header %s\n" % (expected_header)) logging.warn("header_found %s" % (header)) return for current_date, rows in parse_csv(csv_reader): table = out_h5.createTable("/TD", "date_" + current_date, OHLCTableDescription, expectedrows=len(rows), createparents=True) table.append(rows) table.flush() if table is not None: table.flush() end = datetime.datetime.now() diff = (end - start).seconds logging.debug("finished it took %d." % (diff))
def api_delete_item_v2(table, id, request): models = {'user': User, 'blog': Blog, 'comment': Comment} check_user(request.__user__) item = await models[table].find(id) if item: await item.remove() else: logging.warn('id: %s not exist in %s' % (id, table)) return dict(id=id) # ???????????
def save(self): args = list(map(self.getValueOrDefault, self.__mappings__)) rows = await execute(self.__insert__, args) if rows != 1: logging.warn('failed to insert record: affected rows: %s' % rows) # ?????????????
def update(self): args = list(map(self.get, self.__fields__)) rows = await execute(self.__update__, args) if rows != 1: logging.warn('failed to update by primary key: affected rows: %s' % rows) # ????????????
def remove(self): args = [self.get(self.__primary_key__)] rows = await execute(self.__delete__, args) if rows != 1: logging.warn('failed to remove by primary key: affected rows: %s' % rows)
def api_delete_item(table, id, request): models = {'users': User, 'blogs': Blog, 'comments': Comment, 'oauth': Oauth} check_user(request.__user__) item = await models[table].find(id) if item: await item.remove() else: logging.warn('id: %s not exist in %s' % (id, table)) return dict(id=id)
def send_data(self, data): """Wrap data in framer's header and send it""" packet = bytearray() packet.extend(struct.pack('>I', len(data))) packet.extend(data) try: self._transport.write(packet) except Exception as exc: #logging.warn("Conn: {} Exception when sending: {}" # .format(self._connection_id, e)) logging.exception('Conn: %s Exception while sending', self._connection_id, exc_info=exc) self.remove_all_members()
def pause_writing(self): logging.warn("PEER PROTOCOL IS OVER THE HIGH-WATER MARK") self._throttle = True
def resume_writing(self): logging.warn("PEER PROTOCL IS DRAINED BELOW THE HIGH-WATER MARK") self._throttle = False
def pause_writing(self): logging.warn("UDP SOCKET OVER HIGH-WATER MARK")