我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用builtins.map()。
def get_broadcast_funcs(**kwargs): kw = Objectify(kwargs, conf={}) pieces = kw.conf[kw.extract] if kw.extract else kw.conf no_conf = remove_keys(kwargs, 'conf') noop = partial(cast, _type='none') if kw.listize: listed = listize(pieces) piece_defs = map(DotDict, listed) if kw.pdictize else listed parser = partial(parse_conf, **no_conf) pfuncs = [partial(parser, conf=conf) for conf in piece_defs] get_pieces = lambda item: broadcast(item, *pfuncs) elif kw.ptype != 'none': conf = DotDict(pieces) if kw.pdictize and pieces else pieces get_pieces = partial(parse_conf, conf=conf, **no_conf) else: get_pieces = noop ffunc = noop if kw.ftype == 'none' else partial(get_field, **kwargs) return (ffunc, get_pieces)
def protobuf_scalar_to_python(val): assert isinstance(val, ops_pb.Scalar) scalar_key = val.WhichOneof('value') if scalar_key == 'uuid_val': raise ValueError("During deserialization, no attributes should reference UUIDs.") elif scalar_key == 'map_val': return pb_to_dict(val.map_val.map) elif scalar_key == 'null_val': return None elif scalar_key == 'slice_val': val = val.slice_val return slice(val.start.value if val.HasField('start') else None, val.stop.value if val.HasField('stop') else None, val.step.value if val.HasField('step') else None) elif scalar_key == 'dtype_val': return pb_to_dtype(val.dtype_val) elif scalar_key == 'axis': return pb_to_axis(val.axis) elif scalar_key == 'axes_map': return pb_to_axes_map(val.axes_map) return getattr(val, scalar_key)
def protobuf_attr_to_python(val): if val.HasField('scalar'): return protobuf_scalar_to_python(val.scalar) if val.HasField('tensor'): return pb_to_tensor(val.tensor) elif val.HasField('repeated_scalar'): if len(val.repeated_scalar.val) == 1 and \ val.repeated_scalar.val[0].string_val == '_ngraph_iter_sentinel_': return () else: return list(map(protobuf_scalar_to_python, val.repeated_scalar.val)) elif val.HasField('axes'): return protobuf_to_axes(val.axes) else: raise ValueError("Cannot convert {} to python attribute value".format(val))
def quantize(self, line, no_features): if " " not in line: classes, sparse = line.strip(), "" elif line.startswith(' '): classes, sparse = '', line.strip() else: classes, sparse = line.strip().split(None, 1) if classes: y = list(map(int, classes.split(','))) else: y = [] if no_features: return y c, d = [], [] for v in sparse.split(): loc, v = v.split(":") c.append(int(loc)) d.append(float(v)) return (c, d), y
def stream(self, fn, no_features=False): with open(fn, 'rt') as f: n_samples, n_feats, n_classes = list(map(int, f.readline().split())) for i, line in enumerate(f): if i == 0: continue if self.verbose and i % 10000 == 0: print("%s docs encoded" % i) res = self.quantize(line, no_features) if no_features: yield {"labels": res}, res else: (c, d), y = res yield {"labels": y}, sp.csr_matrix((d, ([0] * len(d), c)), shape=(1, n_feats), dtype='float32'), y
def encode_path(path): """Encode paths for use as file names the same way systemd does: "Some unit names reflect paths existing in the file system namespace. Example: a device unit dev-sda.device refers to a device with the device node /dev/sda in the file system namespace. If this applies, a special way to escape the path name is used, so that the result is usable as part of a filename. Basically, given a path, "/" is replaced by "-", and all other characters which are not ASCII alphanumerics are replaced by C-style "\x2d" escapes (except that "_" is never replaced and "." is only replaced when it would be the first character in the escaped path). The root directory "/" is encoded as single dash, while otherwise the initial and ending "/" are removed from all paths during transformation. This escaping is reversible." """ # strip any initial/ending '/' name = path.strip('/') if len(path) > 1 else path name = ''.join(map(encode_char, name)) if name[0] == '.': name = '\\x{:x}'.format(ord('.')) + name[1:] return name
def songs_detail(self, ids, offset=0): tmpids = ids[offset:] tmpids = tmpids[0:100] tmpids = list(map(str, tmpids)) action = 'http://music.163.com/api/song/detail?ids=[{}]'.format( # NOQA ','.join(tmpids)) try: data = self.httpRequest('GET', action) # the order of data['songs'] is no longer the same as tmpids, # so just make the order back data['songs'].sort(key=lambda song: tmpids.index(str(song['id']))) return data['songs'] except requests.exceptions.RequestException as e: log.error(e) return []
def get_folded_config_idxs(states): arr = states.config_array # move the missing allele in between alleles 0,1 arr = arr[:,(0,-1,1),:][:,:,(0,-1,1)] # relabel alleles 0,1 (4 ways to do this) symm_arrs = [arr, arr[:,::-1,:], arr[:,:,::-1], arr[:,::-1,::-1]] # swap the 2 loci symm_arrs += [numpy.transpose(a, axes=(0,2,1)) for a in symm_arrs] # swap back allele 1 with missing allele symm_arrs = [a[:,(0,-1,1),:][:,:,(0,-1,1)] for a in symm_arrs] # get hash val for each (folded) config hash_vals = numpy.vstack(list(map(states.hash_config_array, symm_arrs))) # get the smallest hash val among all the folds hash_vals = numpy.amin(hash_vals, axis=0) assert len(hash_vals) == arr.shape[0] # return the corresponding indices return [states.hash_to_allIdx[h] for h in hash_vals]
def computeLikelihoods(n, exact, popSizes, theta, timeLens, rhoGrid, cores): rhoGrid = list(rhoGrid) assert rhoGrid == sorted(rhoGrid) # make the pool first to avoid copying large objects. maxtasksperchild=1 to avoid memory issues executor = Pool(cores, maxtasksperchild=1) # make the states and the rates states = get_states(n, exact) moranRates = MoranRates(states) # compute initial distributions and likelihoods prevInit = states.getUnlinkedStationary(popSize=popSizes[-1], theta=theta) inits = [] #for rho, rates in reversed(zip(rhoGrid, lastRatesList)): for rho in reversed(rhoGrid): rates = moranRates.getRates(rho=rho, popSize=popSizes[-1], theta=theta) prevInit = stationary(Q=rates, init=prevInit, norm_order=float('inf'), epsilon=1e-2) inits.append(prevInit) ret = executor.map(getColumnHelper, [(moranRates, rho, theta, popSizes, timeLens, prevInit) for rho,prevInit in zip(reversed(rhoGrid),inits)]) logging.info("Cleaning up results...") ret = [states.ordered_log_likelihoods(result) for result in ret] executor.close() return [(rho, lik) for rho,lik in zip(rhoGrid, reversed(ret))]
def testBoundingSpherePrecision(self): x = 533 y = 383 z = 9 geodetic = GlobalGeodetic(True) [minx, miny, maxx, maxy] = geodetic.TileBounds(x, y, z) ter = TerrainTile(west=minx, south=miny, east=maxx, north=maxy) ter.fromFile('tests/data/%s_%s_%s.terrain' % (z, x, y)) llh2ecef = lambda x: LLH2ECEF(x[0], x[1], x[2]) coords = ter.getVerticesCoordinates() coords = list(map(llh2ecef, coords)) sphere = BoundingSphere() sphere.fromPoints(coords) for coord in coords: distance = c3d.distance(sphere.center, coord) self.assertLessEqual(distance, sphere.radius)
def get_addresses_from_input_file(input_file_name): """Read addresses from input file into list of tuples. This only supports address and zipcode headers """ mode = 'r' if sys.version_info[0] < 3: mode = 'rb' with io.open(input_file_name, mode) as input_file: reader = csv.reader(input_file, delimiter=',', quotechar='"') addresses = list(map(tuple, reader)) if len(addresses) == 0: raise Exception('No addresses found in input file') header_columns = list(column.lower() for column in addresses.pop(0)) try: address_index = header_columns.index('address') zipcode_index = header_columns.index('zipcode') except ValueError: raise Exception("""The first row of the input CSV must be a header that contains \ a column labeled 'address' and a column labeled 'zipcode'.""") return list((row[address_index], row[zipcode_index]) for row in addresses)
def palette(self, alpha='natural'): """Returns a palette that is a sequence of 3-tuples or 4-tuples, synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These chunks should have already been processed (for example, by calling the :meth:`preamble` method). All the tuples are the same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when there is a ``tRNS`` chunk. Assumes that the image is colour type 3 and therefore a ``PLTE`` chunk is required. If the `alpha` argument is ``'force'`` then an alpha channel is always added, forcing the result to be a sequence of 4-tuples. """ if not self.plte: raise FormatError( "Required PLTE chunk is missing in colour type 3 image.") plte = group(array('B', self.plte), 3) if self.trns or alpha == 'force': trns = array('B', self.trns or '') trns.extend([255]*(len(plte)-len(trns))) plte = list(map(operator.add, plte, group(trns, 1))) return plte
def zip(*sources): """Combine and forward the elements of several asynchronous sequences. Each generated value is a tuple of elements, using the same order as their respective sources. The generation continues until the shortest sequence is exhausted. Note: the different sequences are awaited in parrallel, so that their waiting times don't add up. """ async with AsyncExitStack() as stack: # Handle resources streamers = [await stack.enter_context(streamcontext(source)) for source in sources] # Loop over items while True: try: coros = builtins.map(anext, streamers) items = await asyncio.gather(*coros) except StopAsyncIteration: break else: yield tuple(items)
def map(source, func, *more_sources): """Apply a given function to the elements of one or several asynchronous sequences. Each element is used as a positional argument, using the same order as their respective sources. The generation continues until the shortest sequence is exhausted. The function can either be synchronous or asynchronous. Note: the different sequences are awaited in parallel, so that their waiting times don't add up. """ iscorofunc = asyncio.iscoroutinefunction(func) if more_sources: source = zip(source, *more_sources) async with streamcontext(source) as streamer: async for item in streamer: if not more_sources: item = (item,) result = func(*item) if iscorofunc: result = await result yield result
def parse_array(str_array, parsefunc=None, try_return_scalar=True): '''splits str_array into elements, and apply func on each element :param str_array: a valid string array, with or without square brackets. Leading and trailing spaces will be ignored (str split is applied twice if the string has square brackets). The separation characters are the comma surrounded by zero or more spaces, or a one or more spaces. E.g. " [1 ,3 ]", "[1,3]" ''' # str_array should always be a string... just in case it's already a parsable value # (e.g., parsefunc = float and str-array = 5.6), then try to parse it first: if parsefunc is not None and try_return_scalar: try: return parsefunc(str_array) except Exception: pass d = str_array.strip() if d[0] == '[' and d[-1] == ']': d = d[1:-1].strip() _ = re.split("(?:\\s*,\\s*|\\s+)", d) if parsefunc is not None: _ = list(map(parsefunc, _)) return _[0] if try_return_scalar and len(_) == 1 else _
def close(self): def action(data): what = data["what"] data.pop("what", None) if what == "insert": index = self.find_closet_index(data["name"]) #avoid duplication if self.db_names[index-1] != data["name"]: self.db.insert(index, data) else: index = self.get_index(data["name"]) if index != -1: self.db[index]["abbrev"] = data["abbrev"] if len(self.updates_db) > 0: list(map(action, self.updates_db)) if not config_file_exists: os.makedirs(os.path.dirname(config_file)) self.db_file = config_file with open(self.db_file, "w") as file_data: json.dump(self.db, file_data) print("Databse was updated")
def assign_scalar(message, value): """ Adds the appropriate scalar type of value to the protobuf message """ if value is None: message.null_val = True elif isinstance(value, np.generic): assign_scalar(message, np.asscalar(value)) elif isinstance(value, (str, six.text_type)): message.string_val = value elif isinstance(value, np.dtype): message.dtype_val = dtype_to_protobuf(value) elif isinstance(value, float): message.double_val = value elif isinstance(value, bool): message.bool_val = value elif isinstance(value, six.integer_types): message.int_val = value elif isinstance(value, slice): slice_val = ops_pb.Slice() if value.start is not None: slice_val.start.value = value.start if value.step is not None: slice_val.step.value = value.step if value.stop is not None: slice_val.stop.value = value.stop message.slice_val.CopyFrom(slice_val) elif isinstance(value, dict): for key in value: assign_scalar(message.map_val.map[key], value[key]) # This encodes an empty dict for deserialization assign_scalar(message.map_val.map['_ngraph_map_sentinel_'], '') elif isinstance(value, Axis): message.axis.CopyFrom(axis_to_protobuf(value)) elif isinstance(value, AxesMap): message.axes_map.CopyFrom(axes_map_to_protobuf(value)) else: raise unhandled_scalar_value(value)
def protobuf_to_axes(msg): if msg.uuid.uuid in GLOBAL_AXIS_REGISTRY: return GLOBAL_AXIS_REGISTRY[msg.uuid.uuid] axes = ngraph.make_axes(list(map(pb_to_axis, msg.axes))) axes.uuid = uuid.UUID(bytes=msg.uuid.uuid) axes.name = msg.name GLOBAL_AXIS_REGISTRY[msg.uuid.uuid] = axes return axes
def __repr__(self): return 'Axes({})'.format( ', '.join(map(repr, self)) )
def __str__(self): return ', '.join(map(str, self))
def _map_axis(self, old_axis): """ Given a map from {old_axes_name: new_axes_name} and an old_axis map the old_axis into the new_axes. """ if old_axis.name in self: return make_axis(old_axis.length, self[old_axis.name]) else: return old_axis
def _duplicate_axis_names(self): """ Returns: a dictionary mapping to duplicate target names and the source names that map to it: {target: set([source, ...])} """ # invert axes_map to see if there are any target axis names that are # duplicated counts = collections.defaultdict(set) for key, value in self.items(): counts[value].add(key) # filter counts to include only duplicate axis return {x: y for x, y in counts.items() if len(y) > 1}
def loadClasses(dataset): # Load reverse map with open(dataset.classes, 'rt') as f: data = json.load(f) return {v: k for k, v in data}
def _cmap_to_rgb(mplmap, values): from matplotlib import cm cmap = getattr(cm, mplmap) mx = values.max() mn = values.min() cat_values = (values-mn)/(mx-mn) # rescale values [0.0,1.0] rgba = cmap(cat_values) # array of RGBA values in range [0.0, 1.0] # strip alpha field and rescale to [0,255] RGB integers rgb = [list(map(int, c[:3]*256.0)) for c in rgba] return rgb
def parse_properties(self, title): """ Parse the 'title' attribute of an element. """ ret = {} # if it's an lxml node, take the 'title' attribute or die trying if hasattr(title, 'attrib'): title = title.attrib['title'] # Split on semicolon, optionally preceded and followed by whitespace for kv in re.split(r'\s*;\s*', title): # Split key and value at first whitespace (k, v) = re.split(r'\s+', kv, 1) # Make sure the property is from the list of known properties try: prop_spec = getattr(HocrSpecProperties, k) # If the property is a list value, split the value at the # property's 'split_pattern' and apply the type to its values if prop_spec.list: if 1 == len(prop_spec.split_pattern): v = list(map(prop_spec.type, re.split(prop_spec.split_pattern[0], v))) elif 2 == len(prop_spec.split_pattern): # lambda vv: map(prop_spec.type, re.split(prop_spec.split_pattern[1], vv)), v = [list(map(prop_spec.type, re.split(prop_spec.split_pattern[1], vv))) for vv in re.split(prop_spec.split_pattern[0], v)] # If the property is a scalar value, apply the type to the value else: v = prop_spec.type(v) except Exception as e: raise type(e)(str(e) + ' (%s on "%s")' % (type(e).__name__, k)) ret[k] = v return ret
def lomb_scargle_direct_sums(t, yw, w, freqs, YY, nharms=1, **kwargs): """ Compute Lomb-Scargle periodogram using direct summations. This is usually only useful for debugging and/or small numbers of frequencies. Parameters ---------- t: array_like Observation times. yw: array_like Observations multiplied by their corresponding weights. `sum(yw)` is the weighted mean. w: array_like Weights for each of the observations. Usually proportional to `1/sigma ** 2` where `sigma` is the observation uncertainties. Normalized so that `sum(w) = 1`. freqs: array_like Trial frequencies to evaluate the periodogram YY: float Weighted variance of the observations. nharms: int, optional (default: 1) Number of harmonics to use in the model. Lomb Scargle only uses 1, but more harmonics allow for greater model flexibility at the cost of higher complexity and therefore reduced signal- to-noise. Returns ------- power: array_like The periodogram powers at each of the trial frequencies """ def sfunc(f): return mhdirect_sums(t, yw, w, f, YY, nharms=nharms) sums = [add_regularization(s, **kwargs) for s in list(map(sfunc, freqs))] ybar = sum(yw) return np.array([mhgls_from_sums(s, YY, ybar) for s in sums])
def map(*args, **kwargs): return list(builtins.map(*args, **kwargs))
def _update_current_reader_sources(self): own_layers = self._get_all_own_layers() if own_layers: self._current_reader_sources = list(map(lambda l: l.source(), own_layers)) else: self._current_reader_sources = None
def _set_qgis_extent(self, zoom, scheme, bounds): """ * Sets the current extent of the QGIS map canvas to the specified bounds :return: """ min_xy = tile_to_latlon(zoom, bounds["x_min"], bounds["y_min"], scheme=scheme) max_xy = tile_to_latlon(zoom, bounds["x_max"], bounds["y_max"], scheme=scheme) min_pos = convert_coordinate(900913, self._get_qgis_crs(), lat=min_xy[1], lng=min_xy[0]) max_pos = convert_coordinate(900913, self._get_qgis_crs(), lat=min_xy[1], lng=max_xy[0]) map_min_pos = QgsPoint(min_pos[0], min_pos[1]) map_max_pos = QgsPoint(max_pos[0], max_pos[1]) rect = QgsRectangle(map_min_pos, map_max_pos) self.iface.mapCanvas().setExtent(rect) self.iface.mapCanvas().refresh()
def apply_map(fn, x): return list(map(fn, x))
def _sum_tiles(first_tile, second_tile): return tuple(map(operator.add, first_tile, second_tile))
def recommend_playlist(self): try: action = 'http://music.163.com/weapi/v1/discovery/recommend/songs?csrf_token=' # NOQA self.session.cookies.load() csrf = '' for cookie in self.session.cookies: if cookie.name == '__csrf': csrf = cookie.value if csrf == '': return False action += csrf req = {'offset': 0, 'total': True, 'limit': 20, 'csrf_token': csrf} page = self.session.post(action, data=encrypted_request(req), headers=self.header, timeout=default_timeout) results = json.loads(page.text)['recommend'] song_ids = [] for result in results: song_ids.append(result['id']) data = map(self.song_detail, song_ids) return [d[0] for d in data] except (requests.exceptions.RequestException, ValueError) as e: log.error(e) return False # ??FM
def lmap(*args, **kwargs): return list(map(*args, **kwargs))
def __repr__(self): if not self: return '%s()' % self.__class__.__name__ items = ', '.join(map('%r: %r'.__mod__, self.most_common())) return '%s({%s})' % (self.__class__.__name__, items) # Multiset-style mathematical operations discussed in: # Knuth TAOCP Volume II section 4.6.3 exercise 19 # and at http://en.wikipedia.org/wiki/Multiset # # Outputs guaranteed to only include positive counts. # # To strip negative and zero counts, add-in an empty counter: # c += Counter()
def test_import_builtins(self): before = """ a = raw_input() b = open(a, b, c) c = filter(a, b) d = map(a, b) e = isinstance(a, str) f = bytes(a, encoding='utf-8') for g in xrange(10**10): pass h = reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) super(MyClass, self) """ after = """ from builtins import bytes from builtins import filter from builtins import input from builtins import map from builtins import range from functools import reduce a = input() b = open(a, b, c) c = list(filter(a, b)) d = list(map(a, b)) e = isinstance(a, str) f = bytes(a, encoding='utf-8') for g in range(10**10): pass h = reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) super(MyClass, self) """ self.convert_check(before, after, ignore_imports=False, run=False)
def _leveldb_write(db, *args, **kwargs): if kwargs: raise NotImplementedError('passing keyword arguments to ' 'LevelDB not supported') db.put(*list(map(native, args)))
def _leveldb_delete(db, *args, **kwargs): if kwargs: raise NotImplementedError('passing keyword arguments to ' 'LevelDB not supported') db.delete(*list(map(native, args)))
def _leveldb_reader(db, *args, **kwargs): if kwargs: raise NotImplementedError('passing keyword arguments to ' 'LevelDB not supported') return db.get(*list(map(native, args)))
def _redis_keys(db, projexpr): return list(map(projexpr, db.keys())) # values
def _redis_values(db, projexpr): return list(map(projexpr, db.mget(list(db.keys())))) # items
def __str__(self): ret = [] ret += [[self.n, self.table.shape[0]]] ret += [[1, self.theta]] ret += [rhos_to_string(self.table.columns).split()] ret += [[],[]] for i,(config, row) in enumerate(self.table.iterrows(),start=1): ret += [[i,"#",config,":"] + list(row)] return "\n".join([" ".join(map(str,x)) for x in ret])
def rhos_to_string(rhos): rhos = numpy.array(rhos) if rhos[0] == 0 and numpy.allclose(rhos[1:] - rhos[:-1], rhos[-1] / float(len(rhos)-1), atol=0): rho_line = [len(rhos), rhos[-1]] else: #ret += [["rho"] + list(rhos)] rho_line = [] prev_rho, prev_diff = rhos[0], float('inf') for rho in rhos[1:]: if not numpy.isclose(rho - prev_rho, prev_diff, atol=0): prev_diff = rho - prev_rho rho_line += [prev_rho, prev_diff] prev_rho = rho rho_line += [prev_rho] return " ".join(map(str, rho_line))
def fromPoints(points, boundingSphere): if len(points) < 1: raise Exception('Your list of points must contain at least 2 points') # Bring coordinates to ellipsoid scaled coordinates scaleDown = lambda coord: [coord[0] * rX, coord[1] * rY, coord[2] * rZ] scaledPoints = list(map(scaleDown, points)) scaledSphereCenter = scaleDown(boundingSphere.center) magnitude = lambda coord: computeMagnitude(coord, scaledSphereCenter) magnitudes = list(map(magnitude, scaledPoints)) return c3d.multiplyByScalar(scaledSphereCenter, max(magnitudes))
def __init__(self, *args, **kwargs): MAX = float('infinity') MIN = float('-infinity') self.center = list(map(float, kwargs.get('center', []))) self.radius = float(kwargs.get('radius', 0)) self.minPointX = [MAX, MAX, MAX] self.minPointY = [MAX, MAX, MAX] self.minPointZ = [MAX, MAX, MAX] self.maxPointX = [MIN, MIN, MIN] self.maxPointY = [MIN, MIN, MIN] self.maxPointZ = [MIN, MIN, MIN] # Based on Ritter's algorithm
def iterboxed(self, rows): """Iterator that yields each scanline in boxed row flat pixel format. `rows` should be an iterator that yields the bytes of each row in turn. """ def asvalues(raw): """Convert a row of raw bytes into a flat row. Result will be a freshly allocated object, not shared with argument. """ if self.bitdepth == 8: return array('B', raw) if self.bitdepth == 16: raw = tostring(raw) return array('H', struct.unpack('!%dH' % (len(raw)//2), raw)) assert self.bitdepth < 8 width = self.width # Samples per byte spb = 8//self.bitdepth out = array('B') mask = 2**self.bitdepth - 1 shifts = list(map(self.bitdepth.__mul__, reversed(list(range(spb))))) for o in raw: out.extend([mask&(o>>i) for i in shifts]) return out[:width] return map(asvalues, rows)