我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用six.moves.map()。
def mapall(funcs, seq): """ Parameters ---------- funcs : iterable[function] Sequence of functions to map over `seq`. seq : iterable Sequence over which to map funcs. Yields ------ elem : object Concatenated result of mapping each ``func`` over ``seq``. Example ------- >>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3])) [2, 3, 4, 0, 1, 2] """ for func in funcs: for elem in seq: yield func(elem)
def query_shards(self, query): """ Return the result of applying shard[query] for each shard in self.shards, as a sequence. If PARALLEL_SHARDS is set, the shards are queried in parallel, using the multiprocessing module. """ args = zip([query] * len(self.shards), self.shards) if PARALLEL_SHARDS and PARALLEL_SHARDS > 1: logger.debug("spawning %i query processes" % PARALLEL_SHARDS) pool = multiprocessing.Pool(PARALLEL_SHARDS) result = pool.imap(query_shard, args, chunksize=1 + len(args) / PARALLEL_SHARDS) else: # serial processing, one shard after another pool = None result = imap(query_shard, args) return pool, result
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path)
def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps
def install_namespaces(self): nsp = self._get_all_ns_packages() if not nsp: return filename, ext = os.path.splitext(self._get_target()) filename += self.nspkg_ext self.outputs.append(filename) log.info("Installing %s", filename) lines = map(self._gen_nspkg_line, nsp) if self.dry_run: # always generate the lines, even in dry run list(lines) return with open(filename, 'wt') as f: f.writelines(lines)
def save(self): """Write changed .pth file back to disk""" if not self.dirty: return rel_paths = list(map(self.make_relative, self.paths)) if rel_paths: log.debug("Saving %s", self.filename) lines = self._wrap_lines(rel_paths) data = '\n'.join(lines) + '\n' if os.path.islink(self.filename): os.unlink(self.filename) with open(self.filename, 'wt') as f: f.write(data) elif os.path.exists(self.filename): log.debug("Deleting empty %s", self.filename) os.unlink(self.filename) self.dirty = False
def run(self): aliases = self.distribution.get_option_dict('aliases') if not self.args: print("Command Aliases") print("---------------") for alias in aliases: print("setup.py alias", format_alias(alias, aliases)) return elif len(self.args) == 1: alias, = self.args if self.remove: command = None elif alias in aliases: print("setup.py alias", format_alias(alias, aliases)) return else: print("No alias definition found for %r" % alias) return else: alias = self.args[0] command = ' '.join(map(shquote, self.args[1:])) edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
def find_data_files(self, package, src_dir): """Return filenames for package's data files in 'src_dir'""" patterns = self._get_platform_patterns( self.package_data, package, src_dir, ) globs_expanded = map(glob, patterns) # flatten the expanded globs into an iterable of matches globs_matches = itertools.chain.from_iterable(globs_expanded) glob_files = filter(os.path.isfile, globs_matches) files = itertools.chain( self.manifest_files.get(package, []), glob_files, ) return self.exclude_data_files(package, src_dir, files)
def find_external_links(url, page): """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" for match in REL.finditer(page): tag, rel = match.groups() rels = set(map(str.strip, rel.lower().split(','))) if 'homepage' in rels or 'download' in rels: for match in HREF.finditer(tag): yield urllib.parse.urljoin(url, htmldecode(match.group(1))) for tag in ("<th>Home Page", "<th>Download URL"): pos = page.find(tag) if pos != -1: match = HREF.search(page, pos) if match: yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
def __init__(s, poly_ring, *args): """ coeffs = [x^0, x^1, x^2, ... degree's coefficient] """ RingElement.__init__(s, poly_ring, args) if len(args) == 1: v = args[0] if isinstance(v, UnivariatePolynomialElement): s.coeffs = v.coeffs elif hasattr(v, "__iter__"): s.coeffs = list(v) else: s.coeffs = [v] else: s.coeffs = args s.trim() s.coeffs = list(map(s.ring.field, s.coeffs))
def __divmod__(s, rhs): assert rhs != 0 if isinstance(rhs, UnivariatePolynomialElement): if len(rhs) == 1: q = UnivariatePolynomialElement(s.ring, map(lambda x: x / rhs[0], s.coeffs)) r = UnivariatePolynomialElement(s.ring, map(lambda x: x % rhs[0], s.coeffs)) return q, r q = UnivariatePolynomialElement(s.ring, 0) r = s d = rhs.degree() c = rhs[-1] while r.degree() >= d: t = UnivariatePolynomialElement(s.ring, [r[-1] / c]).shift(r.degree() - d) q = q + t r = r - t * rhs return q, r else: q = UnivariatePolynomialElement(s.ring, map(lambda x: x / rhs, s.coeffs)) r = UnivariatePolynomialElement(s.ring, map(lambda x: x % rhs, s.coeffs)) return q, r
def _mul(s, A, B): if len(A) == 1 and len(A[0]) == 1: A = A[0][0] return s.element_class(s, map(lambda y: map(lambda x: A * x, y), B)) elif len(B) == 1 and len(B[0]) == 1: B = B[0][0] return s.element_class(s, map(lambda y: map(lambda x: x * B, y), A)) deg_total_1 = max([len(X) + len(Y) - 1 for X, Y in itertools.product(A, B)]) deg_total_2 = len(list(itertools.product(A, B))) ret = [[0] * deg_total_1 for _ in xrange(deg_total_2)] deg1 = 0 for X in A: deg2 = 0 for Y in B: for x, y in enumerate(X): for u, v in enumerate(Y): ret[deg1 + deg2][x + u] += y * v deg2 += 1 deg1 += 1 return s.element_class(s, ret)
def hensel_lift(curve, P): from six.moves import map """ Calculate Lifted Point using Hensel's Lemma Args: curve: The Elliptic Curve P: A point on curve Returns: The "lifted" Point """ from six.moves import map from ecpy.utils import modinv x, y, _ = map(int, tuple(P)) p = curve.field.p t = (((x * x * x + curve.a * x + curve.b) - y * y) // p) % p t = (t * modinv(2 * y, p)) % p return list(map(int, (x, y + (curve.field.p * t))))
def distortion_map(s): """ IMPORTANT: If you want to use this function, definition field should be Extended Finite Field. return \phi(self), \phi is Distortion map Polynomial: x^2+1 or x^2+x+1 """ def to_tuple(x): from six import integer_types if type(x) in integer_types: return (x, 0) return tuple(x) x = to_tuple(s.x) y = to_tuple(s.y) if s.group.field.t == 1: x = (-x[0], -x[1]) y = (y[1], y[0]) elif s.group.field.t == 2: x = (x[1], x[0]) y = (y[0], y[1]) return s.__class__(s.group, x, y)
def mapall(funcs, seq): """ Parameters ---------- funcs : iterable[function] Sequence of functions to map over `seq`. seq : iterable Sequence over which to map funcs. Yields ------ elem : object Concatenated result of mapping each ``func`` over ``seq``. Examples -------- >>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3])) [2, 3, 4, 0, 1, 2] """ for func in funcs: for elem in seq: yield func(elem)
def chrange(start, stop): """ Construct an iterable of length-1 strings beginning with `start` and ending with `stop`. Parameters ---------- start : str The first character. stop : str The last character. Returns ------- chars: iterable[str] Iterable of strings beginning with start and ending with stop. Examples -------- >>> chrange('A', 'C') ['A', 'B', 'C'] """ return list(map(chr, range(ord(start), ord(stop) + 1)))
def __init__(self, url='sqlite:///:memory:', equities=_default_equities, **frames): self._url = url self._eng = None if equities is self._default_equities: equities = make_simple_equity_info( list(map(ord, 'ABC')), pd.Timestamp(0), pd.Timestamp('2015'), ) frames['equities'] = equities self._frames = frames self._eng = None # set in enter and exit
def __init__( self, index_url="https://pypi.python.org/simple", hosts=('*',), ca_bundle=None, verify_ssl=True, *args, **kw ): Environment.__init__(self, *args, **kw) self.index_url = index_url + "/" [:not index_url.endswith('/')] self.scanned_urls = {} self.fetched_urls = {} self.package_pages = {} self.allows = re.compile('|'.join(map(translate, hosts))).match self.to_scan = [] use_ssl = ( verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()) ) if use_ssl: self.opener = ssl_support.opener_for(ca_bundle) else: self.opener = urllib.request.urlopen
def polyline2coords(points): """ Return row and column coordinates for a polyline. >>> rr, cc = polyline2coords([(0, 0), (2, 2), (2, 4)]) >>> list(rr) [0, 1, 2, 2, 3, 4] >>> list(cc) [0, 1, 2, 2, 2, 2] :param list of tuple points: Polyline in format [(x1,y1), (x2,y2), ...] :return: tuple with row and column coordinates in numpy arrays :rtype: tuple of numpy array """ coords = [] for i in range(len(points) - 1): xy = list(map(int, points[i] + points[i + 1])) coords.append(skd.line(xy[1], xy[0], xy[3], xy[2])) return [np.hstack(c) for c in zip(*coords)]
def _from_word2vec_binary(fname): with _open(fname, 'rb') as fin: words = [] header = _decode(fin.readline()) vocab_size, layer1_size = list(map(int, header.split())) # throws for invalid file format vectors = np.zeros((vocab_size, layer1_size), dtype=float32) binary_len = np.dtype(float32).itemsize * layer1_size for line_no in xrange(vocab_size): # mixed text and binary: read text first, then binary word = [] while True: ch = fin.read(1) if ch == b' ': break if ch != b'\n': # ignore newlines in front of words (some binary files have newline, some don't) word.append(ch) word = _decode(b''.join(word)) index = line_no words.append(word) vectors[index, :] = np.fromstring(fin.read(binary_len), dtype=float32) return words, vectors
def parallel_map(fun, args): global POOL args = list(args) if len(args) < 2 or TREECAT_THREADS == 1: return list(map(fun, args)) if POOL is None: POOL = multiprocessing.Pool(TREECAT_THREADS) return POOL.map(fun, args)
def log_profiling_stats(): logger.info('-----------------------------------------------------------') logger.info('Series:') for name, series in sorted(SERIES.items()): logger.info(' {}: {}'.format(name, ' '.join(map(str, series)))) logger.info('-----------------------------------------------------------') logger.info('Histograms:') for name, histogram in sorted(HISTOGRAMS.items()): logger.info('{: >10s} {}'.format('Count', name)) for value, count in sorted(histogram.items()): logger.info('{: >10d} {}'.format(count, value)) logger.info('-----------------------------------------------------------') logger.info('Counters:') logger.info('{: >10s} {}'.format('Count', 'Counter')) for name, count in sorted(COUNTERS.items()): logger.info('{: >10d} {}'.format(count, name)) logger.info('-----------------------------------------------------------') logger.info('Timers:') times = [(t.elapsed, t.count, f) for (f, t) in TIMERS.items()] times.sort(reverse=True, key=lambda x: x[0]) logger.info('{: >10} {: >10} {}'.format('Seconds', 'Calls', 'Function')) for time, count, name in times: logger.info('{: >10.3f} {: >10} {}'.format(time, count, name))
def _defaults_match(self, arg): return any(map(Argument.ignore_default, [self, arg])) \ or (self.default is Argument.any_default and arg.default is not Argument.no_default) \ or (arg.default is Argument.any_default and self.default is not Argument.no_default) \ or self.default == arg.default
def __str__(self): missing_args = list(map(str, self.missing_args)) return '%s is missing argument%s: %s' % ( self.format_callable(), 's' if len(missing_args) > 1 else '', ', '.join(missing_args), )
def dzip_exact(*dicts): """ Parameters ---------- *dicts : iterable[dict] A sequence of dicts all sharing the same keys. Returns ------- zipped : dict A dict whose keys are the union of all keys in *dicts, and whose values are tuples of length len(dicts) containing the result of looking up each key in each dict. Raises ------ ValueError If dicts don't all have the same keys. Example ------- >>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4}) >>> result == {'a': (1, 3), 'b': (2, 4)} True """ if not same(*map(viewkeys, dicts)): raise ValueError( "dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts) ) return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
def __init__(self, **frames): self._eng = None if not frames: frames = { 'equities': make_simple_equity_info( list(map(ord, 'ABC')), pd.Timestamp(0), pd.Timestamp('2015'), ) } self._data = AssetDBWriterFromDataFrame(**frames)