我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用operator.itemgetter()。
def update_bibs_in(grouped_bibs, db_abbrev): actions = { "y": lambda items: [update_in(bibs, db_abbrev) for bibs in items], "m": lambda items: [manual_update_in(bibs, db_abbrev) for bibs in items], "n": lambda items: items } print("\n ") action = input("Abbreviate everthing?" + "y(yes, automatic)/m(manual)/n(do nothing)") grouped_bibs.sort(key=operator.itemgetter('journal')) grouped_by_journal = [] for key, items in groupby(grouped_bibs, lambda i: i["journal"]): grouped_by_journal.append(list(items)) if action in ("y", "m", "n"): updated_bibs = actions.get(action)(grouped_by_journal) else: return update_bibs_in(grouped_bibs, db_abbrev) updated_bibs = reduce(lambda a, b: a+b, updated_bibs) return updated_bibs
def create_pie_chart (input_dict, input_colors, suffix, special_item_key=None) : if special_item_key != None : special_item = dict() special_item[special_item_key] = 0 output_text = u'{{#invoke:Chart|pie chart\n' \ u'| radius = 180\n' \ u'| slices = \n' input_dict = dict(input_dict) sorted_dict = OrderedDict(sorted(input_dict.items(), key=itemgetter(1), reverse=True)) for key, value in sorted_dict.iteritems() : if special_item_key == None or key != special_item_key : output_text += u' ( %d: %s : %s)\n' %(value, key, input_colors[key]) else : special_item[special_item_key] = value if special_item_key != None : output_text += u' ( %d: %s : %s)\n' % (special_item[special_item_key], special_item_key, input_colors[special_item_key]) output_text += u'| units suffix = _%s\n' \ u'| percent = true\n' \ u'}}\n' %(suffix) return output_text
def smooth_emotions(self, prediction): emotions = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"] emotion_values = {'Angry': 0.0, 'Disgust': 0.0, 'Fear': 0.0, 'Happy': 0.0, 'Sad': 0.0, 'Surprise': 0.0, 'Neutral': 0.0} emotion_probability, emotion_index = max((val, idx) for (idx, val) in enumerate(prediction[0])) emotion = emotions[emotion_index] # Append the new emotion and if the max length is reached pop the oldest value out self.emotion_queue.appendleft((emotion_probability, emotion)) # Iterate through each emotion in the queue and create an average of the emotions for pair in self.emotion_queue: emotion_values[pair[1]] += pair[0] # Select the current emotion based on the one that has the highest value average_emotion = max(emotion_values.iteritems(), key=operator.itemgetter(1))[0] return average_emotion
def _main(): # Main program for testing. import os mod = sys.argv[1] if os.path.exists(mod): path = [os.path.dirname(mod)] mod = os.path.basename(mod) if mod.lower().endswith(".py"): mod = mod[:-3] else: path = [] dict = readmodule_ex(mod, path) objs = dict.values() objs.sort(lambda a, b: cmp(getattr(a, 'lineno', 0), getattr(b, 'lineno', 0))) for obj in objs: if isinstance(obj, Class): print "class", obj.name, obj.super, obj.lineno methods = sorted(obj.methods.iteritems(), key=itemgetter(1)) for name, lineno in methods: if name != "__path__": print " def", name, lineno elif isinstance(obj, Function): print "def", obj.name, obj.lineno
def selectProbes(prList): retIDList=[] distances=[] if len(prList) ==1: id,lat,lon=prList[0] retIDList.append(id) else: for iter in range(0,len(prList)-1): id,lat,lon=prList[iter] for iter2 in range(iter+1,len(prList)): id2,lat2,lon2=prList[iter2] dist=haversine(lon,lat,lon2,lat2) distances.append([id,id2,dist]) #retIDList.append(id) sortedDistances=sorted(distances, key=itemgetter(2),reverse=True) ID1,ID2,dist=sortedDistances[0]#Top one retIDList=[ID1,ID2] return retIDList
def __call__(self, getter, *args, **kwargs): size = tf.TensorShape(kwargs['shape']).num_elements() if size < self.small_variable_size_threshold: device_name = self.device_for_small_variables else: device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1)) device_name = self.devices[device_index] self.sizes[device_index] += size kwargs['caching_device'] = device_name var = getter(*args, **kwargs) return var # To be used with custom_getter on tf.get_variable. Ensures the created variable # is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
def index(pattern=None): """Renders index.html page with a list of benchmarks.""" filter_regex = None if pattern: filter_regex = re.compile(urllib.parse.unquote(pattern)) min_time_to_lookup = datetime.now() - timedelta(days=_MAX_DAYS_WITHOUT_RUN) client = datastore.Client() query = client.query(kind='Test') query.add_filter('start', '>', min_time_to_lookup) fetched = list(query.fetch()) test_names = {} # maps test name to encoded test name for fetched_result in fetched: if fetched_result['test'] in test_names: continue # already added if not filter_regex or re.search(pattern, fetched_result['test']): test_names[fetched_result['test']] = urllib.parse.quote( fetched_result['test'], safe='') # convert test_names to list and sort test_names = sorted(test_names.items(), key=itemgetter(1), reverse=True) return render_template('index.html', tests=test_names)
def cluster(self, data_set, threshold, verbose=True): prev_map = {} grp_id = 0 for index in range(len(data_set)): sample = data_set[index] if not verbose: print "[+] Processing Sample:", sample["id"] scores = {prev["id"] : Util.simscore(sample["encoded"], prev["encoded"]) for prev in data_set[:index]} if len(scores) > 0 and max(scores.values()) > threshold: closest = max(scores.iteritems(), key=operator.itemgetter(1))[0] if not verbose: print "[+] Found Closet Cluster:", closest cur_grp_id = prev_map[closest] else: grp_id += 1 cur_grp_id = grp_id prev_map[sample["id"]] = cur_grp_id grp_info = {} for sid, gid in prev_map.iteritems(): if gid not in grp_info: grp_info[gid] = [] grp_info[gid].append(sid) return grp_info
def cluster(self, data_set, threshold, verbose=True): grp_map = {} grp_id = 0 for index in range(len(data_set)): sample = data_set[index] if not verbose: print "[+] Processing Sample:", sample["id"] scores = {} for prev_grp_id, prev_grp_data in grp_map.iteritems(): scores[prev_grp_id] = min([Util.simscore(sample["encoded"], prev["encoded"]) for prev in prev_grp_data]) if len(scores) == 0 or max(scores.values()) < threshold: grp_id += 1 cur_grp_id = grp_id grp_map[cur_grp_id] = [] else: cur_grp_id = max(scores.iteritems(), key=operator.itemgetter(1))[0] if not verbose: print "[+] Found Closet Cluster:", cur_grp_id grp_map[cur_grp_id].append(sample) grp_info = {} for prev_grp_id, prev_grp_data in grp_map.iteritems(): grp_info[prev_grp_id] = [prev["id"] for prev in prev_grp_data] return grp_info
def _do_grouping(self): """Group the dataframe """ # First, do groupby on the first key by sorting on the first key. # This will sort & shuffle the partitions. firstkey = self._by[0] df = self._df.sort_value(firstkey) groups = df.to_delayed() # Second, do groupby internally for each partition. @delayed def _groupby(df, by): grouped = df.groupby(by=by) ovdata = _extract_data_to_check_group_overlap(grouped, by) return grouped, ovdata grouped = [_groupby(g, self._by) for g in groups] # Persist the groupby operation to avoid duplicating the work grouped = persist(*grouped) # Get the groupby objects outgroups = list(map(delayed(operator.itemgetter(0)), grouped)) _check_group_non_overlap_assumption(grouped) return outgroups
def _targets_heist(self, ctx): """Shows a list of targets""" server = ctx.message.server settings = self.check_server_settings(server) t_vault = settings["Theme"]["Vault"] if len(settings["Targets"].keys()) < 0: msg = ("There aren't any targets! To create a target use {}heist " "createtarget .".format(ctx.prefix)) else: target_names = [x for x in settings["Targets"]] crews = [int(subdict["Crew"]) for subdict in settings["Targets"].values()] success = [str(subdict["Success"]) + "%" for subdict in settings["Targets"].values()] vaults = [subdict["Vault"] for subdict in settings["Targets"].values()] data = list(zip(target_names, crews, vaults, success)) table_data = sorted(data, key=itemgetter(1), reverse=True) table = tabulate(table_data, headers=["Target", "Max Crew", t_vault, "Success Rate"]) msg = "```C\n{}```".format(table) await self.bot.say(msg)
def segments(self, precision=0): if max(self.rx, self.ry) < precision: return [[self.center]] p = [(0, self.P(0)), (1, self.P(1))] d = 2 * max(self.rx, self.ry) while d > precision: for (t1, p1), (t2, p2) in zip(p[:-1], p[1:]): t = t1 + (t2 - t1) / 2. d = Segment(p1, p2).pdistance(self.P(t)) p.append((t, self.P(t))) p.sort(key=operator.itemgetter(0)) ret = [x for t, x in p] return [ret]
def simplify_segment(segment, epsilon): """Ramer-Douglas-Peucker algorithm""" if len(segment) < 3 or epsilon <= 0: return segment[:] l = Segment(segment[0], segment[-1]) # Longest segment # Find the furthest point from the segment index, maxDist = max([(i, l.pdistance(p)) for i, p in enumerate(segment)], key=operator.itemgetter(1)) if maxDist > epsilon: # Recursively call with segment splited in 2 on its furthest point r1 = simplify_segment(segment[:index + 1], epsilon) r2 = simplify_segment(segment[index:], epsilon) # Remove redundant 'middle' Point return r1[:-1] + r2 else: return [segment[0], segment[-1]]
def test_model_read_as_dict(self): """ Tests that columns of an instance can be read as a dict. """ tm = TestModel.create(count=8, text='123456789', a_bool=True) column_dict = { 'id': tm.id, 'count': tm.count, 'text': tm.text, 'a_bool': tm.a_bool, } self.assertEqual(sorted(tm.keys()), sorted(column_dict.keys())) self.assertSetEqual(set(tm.values()), set(column_dict.values())) self.assertEqual( sorted(tm.items(), key=itemgetter(0)), sorted(column_dict.items(), key=itemgetter(0))) self.assertEqual(len(tm), len(column_dict)) for column_id in column_dict.keys(): self.assertEqual(tm[column_id], column_dict[column_id]) tm['count'] = 6 self.assertEqual(tm.count, 6)
def ranking(score): """ method to create a score into rank""" data=[] for i in range(len(score)): data.append([score[i],i]) data=sorted(data, key=operator.itemgetter(0), reverse=False) value=data[0][0] data[0][0]=1 for i in range(1,len(score)): val=data[i][0] if val>value : value=val data[i][0]=(i+1) else : data[i][0]=data[i-1][0] data=sorted(data, key=operator.itemgetter(1), reverse=False) final_rank=[] for i in range(len(score)): final_rank.append(data[i][0]) return final_rank #retrieve specific column fron 2dimensional array as a 1dimensional array
def prune(self, min_freq=5, max_size=sys.maxsize): """returns new Vocab object, pruned based on minimum symbol frequency""" pruned_vocab = Vocab(unk=self.unk, emb=self.emb) cnt = 0 for sym, freq in sorted(self.sym2freqs.items(), key=operator.itemgetter(1), reverse=True): # for sym in self.sym2freqs: # freq = self.sym2freqs[sym] cnt += 1 if freq >= min_freq and cnt < max_size: pruned_vocab(sym) pruned_vocab.sym2freqs[sym] = freq if self.frozen: # if original Vocab was frozen, freeze new one pruned_vocab.freeze() return pruned_vocab
def _manage_size(self): if not self._mutex.acquire(False): return try: while len(self) > self.capacity + self.capacity * self.threshold: by_counter = sorted(dict.values(self), key=operator.itemgetter(2), reverse=True) for item in by_counter[self.capacity:]: try: del self[item[0]] except KeyError: # deleted elsewhere; skip continue finally: self._mutex.release()
def rolestat(self, message_object): server = message_object.server msg = "Role stats for this server (" + str(server.member_count) + " users in total):\n" roles = dict() for member in server.members: for member_role in member.roles: if member_role.name != "@everyone": if member_role.name in roles: roles[member_role.name] += 1 else: roles[member_role.name] = 1 sorted_x = sorted(roles.items(), key=operator.itemgetter(1)) for role, count in reversed(sorted_x): msg += role + ": " + str(count) + " users\n" await self.pm.clientWrap.send_message(self.name, message_object.channel, msg)
def predict_dt(): data_with_idx=data_dt.zipWithIndex().map(lambda k,v : (v,k)) test=data_with_idx.sample(False, 0.2, 42) train=data_with_idx.subtractByKey(test) test_data=test.map(lambda idx,p:p) train_data=train.map(lambda idx,p:p) maxDepths=[1,2,3,4,5,10,20] maxBins=[2,4,8,16,32,64,100] m={} for maxDepth in maxDepths: for maxBin in maxBins: metrics=evaluate_dt(train_data, test_data, maxDepth, maxBin) print( "metrics in maxDepth: %d; maxBins: %d" % (maxDepth, maxBin)) print( metrics) m["maxDepth:%d;maxBins:%d" % (maxDepth, maxBin)]=metrics[2] mSort=sorted(m.iteritems(), key=operator.itemgetter(1), reverse=True) print( mSort)
def ignored(self, ctx): """Lists the users currently being ignored.""" ignoreArray = self.settings.getServerStat(ctx.message.server, "IgnoredUsers") # rows_by_lfname = sorted(rows, key=itemgetter('lname','fname')) promoSorted = sorted(ignoreArray, key=itemgetter('Name')) if not len(promoSorted): msg = 'I\'m not currently ignoring anyone.' await self.bot.send_message(ctx.message.channel, msg) return roleText = "Currently Ignored Users:\n" for arole in promoSorted: for role in ctx.message.server.members: if role.id == arole["ID"]: # Found the role ID roleText = '{}*{}*\n'.format(roleText, DisplayName.name(role)) await self.bot.send_message(ctx.message.channel, roleText)
def classify0(inX, dataSet, labels, k): # ???? dataSetSize = dataSet.shape[0] diffMat = tile(inX, (dataSetSize,1)) - dataSet sqDiffMat = diffMat**2 sqDistances = sqDiffMat.sum(axis=1) distances = sqDistances**0.5 sortedDistIndicies = distances.argsort() # ???????k?? classCount = {} for i in range(k): voteIlabel = labels[sortedDistIndicies[i]] classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1 # ?? sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True) return sortedClassCount[0][0]
def address_count(trace): """ Count the diffetent occurences of the addresses in the trace and return a sorted(highest->lowest) occurence list :param trace: execution trace of the binary :return: sorted list starting with the highest address count and ending with the lowest """ trace = [line.addr for line in trace] analysis_result = {} for addr in trace: # for heuristic analysis the count of address count = trace.count(addr) if addr not in analysis_result.keys(): analysis_result[addr] = count # sort the analysis result by most common addresses sorted_result = sorted(analysis_result.items(), key=operator.itemgetter(1)) sorted_result.reverse() return sorted_result
def setUpClass(cls): # Read config.ini file current_dir = os.path.dirname(os.path.realpath(__file__)) config_path = os.path.join(current_dir, 'config.ini') confparser = ConfigParser() with open(config_path, "r") as config_file: confparser.read_file(config_file) cls.url = confparser['test']['url'] cls.username = confparser['test']['username'] cls.password = confparser['test']['password'] cls.api = LimeSurvey( url=cls.url, username=cls.username) cls.session_key = None cls.api.open(password=cls.password) surveys = sorted(cls.api.survey.list_surveys(), key=itemgetter("sid")) cls.survey_id = surveys[0].get("sid") cls.survey_id_invalid = -1
def get_stats(project_id, geo=False, period='2 week'): """Return the stats of a given project.""" hours, hours_anon, hours_auth, max_hours, \ max_hours_anon, max_hours_auth = stats_hours(project_id, period) users, anon_users, auth_users = stats_users(project_id, period) dates, dates_anon, dates_auth = stats_dates(project_id, period) n_tasks(project_id) sum(dates.values()) sorted(dates.iteritems(), key=operator.itemgetter(0)) dates_stats = stats_format_dates(project_id, dates, dates_anon, dates_auth) hours_stats = stats_format_hours(project_id, hours, hours_anon, hours_auth, max_hours, max_hours_anon, max_hours_auth) users_stats = stats_format_users(project_id, users, anon_users, auth_users, geo) return dates_stats, hours_stats, users_stats
def knapsack(items, capacity): table = [[0 for w in range(capacity + 1)] for j in xrange(len(items) + 1)] for j in xrange(1, len(items) + 1): item, wt, val = items[j-1] for w in xrange(1, capacity + 1): if wt > w: table[j][w] = table[j-1][w] else: table[j][w] = max(table[j-1][w], table[j-1][w-wt] + val) result = [] w = capacity for j in range(len(items), 0, -1): was_added = table[j][w] != table[j-1][w] if was_added: item, wt, val = items[j-1] result.append(items[j-1]) w -= wt return result, sum(map(itemgetter(2), result))
def barGraph(data_count): names, count_in = [], [] data_count = sorted(data_count.items(), key=operator.itemgetter(1), reverse=True) for i in data_count: names.append(i[0]) count_in.append(i[-1]) plt.rcdefaults() fig, ax = plt.subplots() y_pos = np.arange(len(names)) ax.barh(y_pos, count_in, align='center', color='green', ecolor='black') ax.set_yticks(y_pos) ax.set_yticklabels(names) ax.invert_yaxis() # labels read top-to-bottom ax.set_xlabel('Categories') ax.set_title('# of job titles in each category') plt.show()
def knn(self, test_X = [], k = 3): size = self.train_X.shape[0] # Euclidean algorithm diff = tile(test_X, (size, 1)) - self.train_X dist_pow2 = diff ** 2 dist_sum = dist_pow2.sum(axis = 1) dist_sqrt = dist_sum ** 0.5 dist = dist_sqrt.argsort() # vote for neighbors class_count = {} for i in range(k): vote_label = self.train_Y[dist[i]] class_count[vote_label] = class_count.get(vote_label, 0) + 1 sorts = sorted(class_count.iteritems(), key = operator.itemgetter(1), reverse = True) return sorts[0][0]
def draw(self, layer, timestamp): """ Draw the next layer """ # Yield until the queue becomes active events = await self.get_input_events() if len(events) > 0: self._process_events(events) # paint circles in descending timestamp order (oldest first) events = sorted(events, key=operator.itemgetter(0), reverse=True) for event in events: distance = 1.0 - event.percent_complete if distance < 1.0: radius = self._max_distance * distance self._draw_circles(layer, radius, event) return True return False
def rank_phases(phases, weights, thresholds): values = {} scores = [] for attribute, weight in weights.items(): values[attribute] = [getattr(phase, attribute) for phase in phases] for phase in phases: scores.append((sum(weight * score([thresholds[attribute]] + values[attribute], getattr(phase, attribute)) for attribute, weight in weights.items()) / sum(weights.values()), phase)) ranked_phases = [] for rank, phase in sorted(scores, key=itemgetter(0), reverse=True): phase.attributes['rank'] = rank ranked_phases.append(phase) return ranked_phases
def attempt_naive_pov(self): p1 = self._find_naive_leaks() p2 = self._find_naive_leaks() leaked = dict() for si in p1: if si in p2: li = list(set(p2[si]).intersection(set(p1[si]))) if len(li) > 0: for lb in li: leaked[lb] = si # find four contiguous consecutive_groups = [ ] for _, g in groupby(enumerate(sorted(leaked)), lambda (i,x):i-x): consecutive_groups.append(map(itemgetter(1), g))
def get_largest_consecutive(self): # extra work here because we need to be confident about the bytes ss = self.state.copy() ss.add_constraints(self.minimized_ast == ss.se.BVV(ss.se.eval(self.minimized_ast, cast_to=str))) leaked_bytes = [ ] for byte in self.possibly_leaked_bytes: if self._confident_byte(ss, byte): leaked_bytes.append(byte) leaked_bytes = sorted(set(leaked_bytes)) consec_bytes = [ ] # find consecutive leaked bytes for _, g in groupby(enumerate(leaked_bytes), lambda (i, x): i-x): consec_bytes.append(map(itemgetter(1), g))
def get_vir_network_dhcp_lease(conn, vm_name): """Libvirt since 1.2.6 version provides DHCPLeases method in virNetwork. That's the current official way for getting DHCP leases and this information isn't stored anywhere else anymore. """ network = conn.networkLookupByName('vagrant-private-dhcp') dhcp_leases = libvirt.virNetwork.DHCPLeases(network) vm_dhcp_leases = filter(lambda lease: lease['hostname'] == vm_name, dhcp_leases) newest_vm_dhcp_lease = sorted(vm_dhcp_leases, key=operator.itemgetter('expirytime'), reverse=True)[0]['ipaddr'] return newest_vm_dhcp_lease
def sort_xy(x, y): ''' Sorts a pair of x and y iterables, returning arrays in order of ascending x. Args: x (`iterable`): a list, numpy ndarray, or other iterable to sort by. y (`iterable`): a list, numpy ndarray, or other iterable that is y=f(x). Returns: tuple containing: `iterable`: an iterable containing the sorted x elements. `iterable`: an iterable containing the sorted y elements. ''' # zip x and y, sort by the 0th element (x) of each tuple in zip() _ = sorted(zip(x, y), key=itemgetter(0)) sorted_x, sorted_y = zip(*_) return sorted_x, sorted_y
def print(self, f, format='counts'): for key, counts in self.count_dict.items(): if (self.source_tf[key] >= self.source_tf_filter) and \ (self.source_df[key] / float(self.count_docs) <= self.source_df_filter): candidates = [(v, c) for v, c in counts.items() if not self._filtered_trans(v)] candidates = sorted(candidates, key=itemgetter(1), reverse=True) elif len(self.source_tf) == 0: # no tf/df counts - dictionary read from file candidates = sorted(counts.items(), key=itemgetter(1), reverse=True) else: continue if self.top_n: candidates = candidates[:self.top_n] if candidates: if format == 'counts': f.write(u'%s\t%s\n' % (key, ' '.join([self._format(v, c) for v, c in candidates]))) elif format == 'solr': f.write(u'%s => %s\n' % (key, candidates[0][0]))
def historigram(filename): values = {} mostpixels = [] im = Image.open(filename) im = im.convert("P") his = im.histogram() for i in range(256): values[i] = his[i] print('Id ' + 'Number of pixels') for j,k in sorted(values.items(), key=itemgetter(1), reverse=True)[:10]: print(j,k) mostpixels.append([j,k]) return mostpixels
def write_snp_summary(self, file="snp_summary.csv", summary_parameters=None, sort=False): if summary_parameters is None: summary_parameters = ["maf", "hwe", "rep", "call_rate"] out_file = os.path.join(self.out_path, self.attributes["project"] + "_" + file) out_data = [["id"] + summary_parameters] snps = [[snp] + [data[parameter] for parameter in summary_parameters] for snp, data in self.data.items()] if sort: snps = sorted(snps, key=operator.itemgetter(*[i for i in range(1, len(summary_parameters)+1)]), reverse=True) out_data += snps with open(out_file, "w") as snp_summary: writer = csv.writer(snp_summary) writer.writerows(out_data)
def _compare_entries(self, ids, selector="maf", selector_list=None): """ Gets data from dictionary for each duplicate SNP according to 'selector' and returns the allele identification of the best entry. Selector list currently sorts descending, that is all selector values must be ranked highest value ("best") - this is the case for MAF, Call Rate, Rep, Read Counts ... Later rank the data by QC Score. """ if selector_list is None: entries_stats = [[i, self.data[i][selector]] for i in ids] entries_ranked = sorted(entries_stats, key=operator.itemgetter(1), reverse=True) else: entries_stats = [[i] + [self.data[i][selector] for selector in selector_list] for i in ids] entries_ranked = sorted(entries_stats, key=operator.itemgetter(*[i for i in range(1, len(selector_list)+1)]), reverse=True) return entries_ranked[0][0] ########################################################################################################################
def unique_for_country_code(self, country_code): shipping = self.filter( Q(country_code=country_code) | Q(country_code=ANY_COUNTRY)) shipping = shipping.order_by('shipping_method_id') shipping = shipping.values_list('shipping_method_id', 'id', 'country_code') grouped_shipping = groupby(shipping, itemgetter(0)) any_country = ANY_COUNTRY ids = [] for shipping_method_id, method_values in grouped_shipping: method_values = list(method_values) # if there is any country choice and specific one remove any country choice if len(method_values) == 2: method = [val for val in method_values if val[2] != any_country][0] else: method = method_values[0] ids.append(method[1]) return self.filter(id__in=ids)
def unique_for_country_code(self, country_code): shipping = self.filter( Q(country_code=country_code) | Q(country_code=ANY_COUNTRY)) shipping = shipping.order_by('shipping_method_id') shipping = shipping.values_list( 'shipping_method_id', 'id', 'country_code') grouped_shipping = groupby(shipping, itemgetter(0)) any_country = ANY_COUNTRY ids = [] for shipping_method_id, method_values in grouped_shipping: method_values = list(method_values) # if there is any country choice and specific one remove any # country choice if len(method_values) == 2: method = [val for val in method_values if val[2] != any_country][0] else: method = method_values[0] ids.append(method[1]) return self.filter(id__in=ids)
def sortby(self, name_or_index): name, index = None, None if isinstance(name_or_index, int): index = name_or_index else: name = name_or_index if name is not None: try: colnum = self._colnames.index(name) except ValueError: raise ValueError('column {} not in {}'.format(name, self._colnames)) else: if index < 0 or index >= self._width: raise ValueError('index out of range 0..{:d}'.format(self._width - 1)) colnum = index self._rows.sort(key=itemgetter(colnum))
def sort_rows(self, rows, section): """ Sort the rows, as appropriate for the section. Args: rows(list): List of tuples (all same length, same values in each position) section(str): Name of section, should match const in Differ class """ #print("@@ SORT ROWS:\n{}".format(rows)) # Section-specific determination of sort key if section.lower() == Differ.CHANGED.lower(): sort_key = Differ.CHANGED_DELTA else: sort_key = None if sort_key is not None: rows.sort(key=itemgetter(sort_key))
def clean_oldbackups(self): '''auto clean old backups''' backuppath = self.addon.getSetting("backup_path").decode("utf-8") max_backups = self.addon.getSetting("max_old_backups") if max_backups: max_backups = int(max_backups) all_files = [] for filename in xbmcvfs.listdir(backuppath)[1]: if ".zip" in filename and "Skinbackup" in filename: filename = filename.decode("utf-8") filepath = backuppath + filename filestat = xbmcvfs.Stat(filepath) modified = filestat.st_mtime() del filestat log_msg(modified) all_files.append((filepath, modified)) if len(all_files) > max_backups: from operator import itemgetter old_files = sorted(all_files, key=itemgetter(1), reverse=True)[max_backups - 1:] for backupfile in old_files: delete_file(backupfile[0])
def restrict(self, support, indices=False): """Restrict the features to those in support using feature selection. This function modifies the estimator in-place. """ if not indices: support = np.where(support)[0] names = self.feature_names_ new_vocab = {} for i in support: new_vocab[names[i]] = len(new_vocab) self.vocabulary_ = new_vocab self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab), key=itemgetter(1))] return self
def get_server_list(sort_by_load=False, sort_by_country=False): try: resp = requests.get(API_ADDR + '/server', timeout=TIMEOUT) if resp.status_code == requests.codes.ok: server_list = resp.json() if sort_by_load: return sorted(server_list, key=itemgetter('load')) elif sort_by_country: return sorted(server_list, key=itemgetter('country')) else: return server_list else: return None except Exception as ex: return None
def rank(self): """ Rank the URLs. A tuple is returned with (url, #occur) in decreasing order of occurences """ occurs = [] for url in self.urls: data = requests.get(url).content words = map(lambda x: x.lower().strip(), data.split()) # Filter empty words count = words.count(self.word) occurs.append((url, count)) # Return in sorted order return sorted(occurs, key=operator.itemgetter(1), reverse=True)