我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用pdb.set_trace()。
def rforest2(train, test, tunings=None, smoteit=True, duplicate=True): "RF " # Apply random forest Classifier to predict the number of bugs. if smoteit: train = SMOTE(train, atleast=50, atmost=101, resample=duplicate) if not tunings: clf = RandomForestRegressor(n_estimators=100, random_state=1) else: clf = RandomForestRegressor(n_estimators=int(tunings[0]), max_features=tunings[1] / 100, min_samples_leaf=int(tunings[2]), min_samples_split=int(tunings[3]) ) train_DF = formatData(train) test_DF = formatData(test) features = train_DF.columns[:-2] klass = train_DF[train_DF.columns[-2]] # set_trace() clf.fit(train_DF[features], klass) preds = clf.predict(test_DF[test_DF.columns[:-2]]) return preds
def reshape_array(array, newsize, pixcombine='sum'): """ Reshape an array to a give size using either the sum, mean or median of the pixels binned Note that the old array dimensions have to be multiples of the new array dimensions --- INPUT --- array Array to reshape (combine pixels) newsize New size of array pixcombine The method to combine the pixels with. Choices are sum, mean and median """ sh = newsize[0],array.shape[0]//newsize[0],newsize[1],array.shape[1]//newsize[1] pdb.set_trace() if pixcombine == 'sum': reshapedarray = array.reshape(sh).sum(-1).sum(1) elif pixcombine == 'mean': reshapedarray = array.reshape(sh).mean(-1).mean(1) elif pixcombine == 'median': reshapedarray = array.reshape(sh).median(-1).median(1) return reshapedarray # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def record_moves_for_robots(state, debug_robots, interactive): import roborally.api as api for cell, pos in state.board.traverse(): if cell.content and cell.content[TYPE] == ROBOT: robot = cell.content api.SIGHT = get_robot_sight(state, pos, robot) try: robot['move'] = robot['brain'].ai.move() except Exception as error: if debug_robots: if debug_robots == 'interactive': import pdb pdb.set_trace() raise error robot[LIFE] -= 1 if robot[LIFE] == 0: record_death(robot, 'overheating', interactive) robot['move'] = LASER if robot['move'] not in MOVES: robot['move'] = LASER
def setBreak(self,breakFlag = True): """Method to invoke the Python pdb debugger when this element is about to be parsed. Set C{breakFlag} to True to enable, False to disable. """ if breakFlag: _parseMethod = self._parse def breaker(instring, loc, doActions=True, callPreParse=True): import pdb pdb.set_trace() return _parseMethod( instring, loc, doActions, callPreParse ) breaker._originalParseMethod = _parseMethod self._parse = breaker else: if hasattr(self._parse,"_originalParseMethod"): self._parse = self._parse._originalParseMethod return self
def runReactorWithLogging(config, oldstdout, oldstderr): from twisted.internet import reactor try: if config['profile']: if not config['nothotshot']: runWithHotshot(reactor, config) else: runWithProfiler(reactor, config) elif config['debug']: sys.stdout = oldstdout sys.stderr = oldstderr if runtime.platformType == 'posix': signal.signal(signal.SIGUSR2, lambda *args: pdb.set_trace()) signal.signal(signal.SIGINT, lambda *args: pdb.set_trace()) fixPdb() pdb.runcall(reactor.run) else: reactor.run() except: if config['nodaemon']: file = oldstdout else: file = open("TWISTD-CRASH.log",'a') traceback.print_exc(file=file) file.flush()
def pytest_configure(config): if config.getvalue("usepdb") or config.getvalue("usepdb_cls"): config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') if config.getvalue("usepdb_cls"): modname, classname = config.getvalue("usepdb_cls").split(":") __import__(modname) pdb_cls = getattr(sys.modules[modname], classname) else: pdb_cls = pdb.Pdb pytestPDB._pdb_cls = pdb_cls old = (pdb.set_trace, pytestPDB._pluginmanager) def fin(): pdb.set_trace, pytestPDB._pluginmanager = old pytestPDB._config = None pytestPDB._pdb_cls = pdb.Pdb pdb.set_trace = pytest.set_trace pytestPDB._pluginmanager = config.pluginmanager pytestPDB._config = config config._cleanup.append(fin)
def do(): for (a,b,c) in walk('./'): if not '.svn' in a: if not a =='./': for file in c: if 'arff' in file: print(a+'/'+file) arff = loadarff(a+'/'+file) tags = [aa for aa in arff[1]] header = ['$'+h for h in tags[:-1]] header.append('$>'+tags[-1]) # set_trace() name = [] with open(a+'/'+re.sub('.arff|[ ]','',file)+'.csv', 'w+') as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='|') writer.writerow(header) for body in arff[0].tolist(): writer.writerow(body) # else: # print('arff' in file) # # set_trace()
def do(): for (a,b,c) in walk('./'): if not '.svn' in a: if not a =='./': for file in c: if '.csv' in file: print(a+'/'+file) arff = pd.read_csv(a+'/'+file) # set_trace() tags = arff.columns.values first3 = ['?'+h for h in tags[0:3]] indep = ['$'+h for h in tags[3:-1]] header = first3+indep header.append('?>'+tags[-1]) # set_trace() # name = [] with open(a+'/'+file, 'w+') as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='|') writer.writerow(header) for body in arff.values: writer.writerow(body) # else: # print('arff' in file) # # set_trace()
def _test(): tbl_loc = explore(dir='../Data/Seigmund/', name='Apache') tbl = csv2DF(tbl_loc) # Define Tree settings opt = Thing( min=1, maxLvL=10, infoPrune=0.5, klass=-1, prune=False, debug=True, verbose=True) #Build a tree tree = dtree(tbl, opt=opt) # Show the tree if opt.verbose: show(tree) # ----- Debug? ----- if opt.debug: set_trace()
def rforest(train, test, tunings=None, smoteit=True, duplicate=True): "RF " # Apply random forest Classifier to predict the number of bugs. if smoteit: train = SMOTE(train, atleast=50, atmost=101, resample=duplicate) if not tunings: clf = RandomForestClassifier(n_estimators=100, random_state=1) else: clf = RandomForestClassifier(n_estimators=int(tunings[0]), max_features=tunings[1] / 100, min_samples_leaf=int(tunings[2]), min_samples_split=int(tunings[3]) ) train_DF = formatData(train) test_DF = formatData(test) features = train_DF.columns[:-2] klass = train_DF[train_DF.columns[-2]] # set_trace() clf.fit(train_DF[features], klass) preds = clf.predict(test_DF[test_DF.columns[:-2]]) return preds
def CART(train, test, tunings=None, smoteit=True, duplicate=True): " CART" # Apply random forest Classifier to predict the number of bugs. if smoteit: train = SMOTE(train, atleast=50, atmost=101, resample=duplicate) if not tunings: clf = DecisionTreeClassifier() else: clf = DecisionTreeClassifier(max_depth=int(tunings[0]), min_samples_split=int(tunings[1]), min_samples_leaf=int(tunings[2]), max_features=float(tunings[3] / 100), max_leaf_nodes=int(tunings[4]), criterion='entropy') train_DF = formatData(train) test_DF = formatData(test) features = train_DF.columns[:-2] klass = train_DF[train_DF.columns[-2]] # set_trace() clf.fit(train_DF[features].astype('float32'), klass.astype('float32')) preds = clf.predict(test_DF[test_DF.columns[:-2]].astype('float32')).tolist() return preds
def __init__(self, train=None, test=None, test_DF=None, name='Apache', verbose=True, smoteit=False, majority=False, bin=False): self.train, self.test = train, test self.name = name try: self.train_DF = createTbl(train, _smote=smoteit, isBin=False) except: set_trace() if not test_DF: self.test_DF = createTbl(test, isBin=False) else: self.test_DF = test_DF self.ignore = [self.train_DF.headers[i].name[1:] for i in avoid(name=name)] self.verbose, self.smoteit = verbose, smoteit self.mod, self.keys = [], self.getKey() self.majority = majority t = discreteNums( createTbl(train, _smote=smoteit, isBin=bin), map( lambda x: x.cells, self.train_DF._rows)) self.myTree = tdiv(t) # set_trace()
def get_rpid2pid(self, dump_date): print('get_rpid2pid...') title2id = self.get_title2id(dump_date) rpid2pid = {} regex = re.compile(r"\((\d+),0,'(.+?)','") fname = '/home/ddimitrov/data/enwiki20150304_plus_clickstream/enwiki-' + dump_date + '-redirect.sql.gz' with gzip.GzipFile(fname, 'rb') as infile: for line in infile: line = line.decode('utf-8') if not line.startswith('INSERT'): continue line = line.replace('NULL', "''") for pid, title in regex.findall(line): try: rpid2pid[pid] = title2id[DataHandler.unescape_mysql(title)] except KeyError: print(pid, title) # pdb.set_trace() return rpid2pid
def relations(self,response): self.obj.get(response.url) followees_a = self.obj.find_elements_by_xpath('//a[@class="UserLink-link"]') #pdb.set_trace() #followees_a = response.xpath('//a[@class="UserLink-link"]/@href').extract() followees = [] for one in followees_a: try: one = one.get_attribute('href') followees.append(one.replace('https://www.zhihu.com/people/','')) except: pass followees = list(set(followees)) #pdb.set_trace() response.meta['item']['relations_id']+=followees nextpage_button = response.xpath('//button[@class="Button PaginationButton PaginationButton-next Button--plain"]').extract() if nextpage_button: #pdb.set_trace() nextpage_url = response.url.replace('?page='+str(response.meta['page']),'') + "?page=" + str(response.meta['page']+1) yield Request(nextpage_url,callback=self.relations,meta={'page':response.meta['page']+1,'item':response.meta['item']}) else: yield response.meta['item'] for user in followees: yield Request('https://www.zhihu.com/people/'+user+'/answers',callback=self.parse)
def parse_relation(self,response): json_result = str(response.body,encoding="utf8").replace('false','0').replace('true','1') dict_result = eval(json_result) relations_id = [] for one in dict_result['data']: relations_id.append(one['url_token']) response.meta['item']['relations_id'] = relations_id if response.meta['offset'] == 0: response.meta['item']['relation_type'] = response.meta['relation_type'] else: response.meta['item']['relation_type'] = 'next:' + response.meta['relation_type'] #pdb.set_trace() yield response.meta['item'] for one in response.meta['item']['relations_id']: yield Request('https://www.zhihu.com/api/v4/members/'+one+'?include=locations,employments,industry_category,gender,educations,business,follower_count,following_count,description,badge[?(type=best_answerer)].topics',meta={'user_id':one},callback=self.parse) #pdb.set_trace() if dict_result['paging']['is_end'] == 0: #pdb.set_trace() offset = response.meta['offset'] + 20 next_page = re.findall('(.*offset=)\d+',response.url)[0] #pdb.set_trace() yield Request(next_page + str(offset),callback=self.parse_relation,meta={'item':response.meta['item'],'offset':offset,'relation_type':response.meta['relation_type']})
def parse_article(self,response): json_result = str(response.body,encoding="utf8").replace('false','0').replace('true','1') dict_result = eval(json_result) for one in dict_result['data']: item = ArticleItem() item['author_id'] = response.meta['author_id'] item['title'] = one['title'] item['article_id'] = one['id'] item['content'] = one['content'] #pdb.set_trace() item['cretated_time'] = one['created'] item['updated_time'] = one['updated'] item['voteup_count'] = one['voteup_count'] item['comment_count'] = one['comment_count'] yield item if dict_result['paging']['is_end'] == 0: offset = response.meta['offset'] + 20 next_page = re.findall('(.*offset=)\d+',response.url)[0] yield Request(next_page + str(offset),callback=self.parse_article,meta={'author_id':response.meta['author_id'],'offset':offset})
def process_response(self, request, response, spider): #pdb.set_trace() reason = response_status_message(response.status) if response.status in [300, 301, 302, 303]: pdb.set_trace() if reason == '301 Moved Permanently': return self._retry(request, reason, spider) or response # ?? else: raise IgnoreRequest elif response.status in [403, 414]: logger.error("%s! Stopping..." % response.status) os.system("pause") updateCookie(request.meta['accountText'], self.rconn, spider.name, request.cookies) return self._retry(request, reason, spider) or response # ?? else: return response
def __init__(self, hostname, username, password, timeout=60, optional_args=None): """ CTOR for the device. """ pdb.set_trace() if optional_args is None: optional_args = {} self.device = None self.hostname = hostname self.username = username self.password = password self.timeout = timeout self.port = optional_args.get('port', 22)
def get_annotation(): annotation_id = request.form["annotation_id"] annotation = Annotation.query.filter(Annotation.id == annotation_id).first() #pdb.set_trace(); if annotation.keywords_therapist =='': selected_therapist = '' else: selected_therapist = [str(WORD_TO_ID_THERAPIST[k]) for k in annotation.keywords_therapist.split(", ")] if annotation.keywords_child =='': selected_child = '' else: selected_child = [str(WORD_TO_ID_CHILD[k]) for k in annotation.keywords_child.split(", ")] return jsonify({ "t_start": annotation.start_frame / FPS, "t_end": annotation.end_frame / FPS, "description": annotation.description, "selected_vocab_child": selected_child, "selected_vocab_therapist": selected_therapist, "description_type":annotation.description_type, })
def get_all_annotations(): video_name = request.args["selected_video"] video = Video.query.filter(Video.name == video_name).first() user = current_user # pdb.set_trace() annotation_list = Annotation.query.filter( (Annotation.user_id == current_user.id) & (Annotation.video_id == video.id)).order_by(sqlalchemy.asc(Annotation.id)).all() return jsonify([ { 'selected_video': video.name, 'time_start': row.start_frame / FPS, 'time_end': row.end_frame / FPS, 'select_vocab_child': row.keywords_child, 'select_vocab_therapist': row.keywords_therapist, 'description': row.description, 'description_type': row.description_type, 'ann_number': row.id, } for row in annotation_list ])
def main(): pdb.set_trace() rw = RegularWall(1920, 1080, 2, 2, 100, 100, 150, 150) with open('data/rw.pickle', 'w') as fh: pickle.dump(rw, fh) with open('data/rw.pickle', 'r') as fh: rw2 = pickle.load(fh) print(rw2) rw.wall.save('data/regular_wall.pickle') w = Wall(0,0) w.restore('data/regular_wall.pickle') print(w) sys.exit(0) pgui = ProjectionGUI(HDres, rw.wall) print("finished") p1 = Projection(HDres, rw.wall) p1.run_transforms() beach = cv2.imread('data/antigua_beaches-wallpaper-1920x1080.jpg') p1.render(beach, fname='data/beach-2x2-hd.png') rw = RegularWall(1920, 1080, 4, 7, 100, 100, 150, 150) p1 = Projection(HDres, rw.wall) p1.run_transforms() beach = cv2.imread('data/antigua_beaches-wallpaper-1920x1080.jpg') p1.render(beach, fname='data/beach-4x7-hd.png')
def _gen_dup_trinary_alloy(self, sp1, n1, sp2, n2): init_numbers = self.init_cell.numbers isp1 = sp1.Z isp2 = sp2.Z sp_ind_origin = [i for i, s in enumerate(init_numbers)] for sp1_comb_index in combinations(sp_ind_origin, n1): sp_ind_bin = [x for x in sp_ind_origin if x not in sp1_comb_index] for sp2_comb_index in combinations(sp_ind_bin, n2): numbers = init_numbers.copy() for i1, i2 in zip_longest(sp1_comb_index, sp2_comb_index): if i1 is not None: numbers[i1] = isp1 if i2 is not None: numbers[i2] = isp2 yield GeneralCell(self.lattice, self.positions, numbers) # pdb.set_trace()