我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用operator.add()。
def palette(self, alpha='natural'): """Returns a palette that is a sequence of 3-tuples or 4-tuples, synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These chunks should have already been processed (for example, by calling the :meth:`preamble` method). All the tuples are the same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when there is a ``tRNS`` chunk. Assumes that the image is colour type 3 and therefore a ``PLTE`` chunk is required. If the `alpha` argument is ``'force'`` then an alpha channel is always added, forcing the result to be a sequence of 4-tuples. """ if not self.plte: raise FormatError( "Required PLTE chunk is missing in colour type 3 image.") plte = group(array('B', self.plte), 3) if self.trns or alpha == 'force': trns = array('B', self.trns or []) trns.extend([255]*(len(plte)-len(trns))) plte = list(map(operator.add, plte, group(trns, 1))) return plte
def exec(self, proc: Processor): self.proc = proc first_bits = self.proc.memory.REGISTERS[self.register].values if not self.literal: # look up the register binary second_bits = self.proc.memory.REGISTERS[self.argument].values else: # retrieve the converted literal as binary second_bits = self.argument.values result = self.operator(first_bits, second_bits) # add the carry if needed if self.operator is ripple_add_c or ripple_sub_c: self.zero_bits[len(self.zero_bits) - 1] = self.proc.external.carry result = ripple_add(result, self.zero_bits) self.proc.memory.REGISTERS[self.register].values = result # increment pc self.proc.manager.next() # 67% slower than ArithmeticOperation
def palette(self, alpha='natural'): """Returns a palette that is a sequence of 3-tuples or 4-tuples, synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These chunks should have already been processed (for example, by calling the :meth:`preamble` method). All the tuples are the same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when there is a ``tRNS`` chunk. Assumes that the image is colour type 3 and therefore a ``PLTE`` chunk is required. If the `alpha` argument is ``'force'`` then an alpha channel is always added, forcing the result to be a sequence of 4-tuples. """ if not self.plte: raise FormatError( "Required PLTE chunk is missing in colour type 3 image.") plte = group(array('B', self.plte), 3) if self.trns or alpha == 'force': trns = array('B', self.trns or '') trns.extend([255]*(len(plte)-len(trns))) plte = map(operator.add, plte, group(trns, 1)) return plte
def with_statement_hint(self, text, dialect_name='*'): """add a statement hint to this :class:`.Select`. This method is similar to :meth:`.Select.with_hint` except that it does not require an individual table, and instead applies to the statement as a whole. Hints here are specific to the backend database and may include directives such as isolation levels, file directives, fetch directives, etc. .. versionadded:: 1.0.0 .. seealso:: :meth:`.Select.with_hint` """ return self.with_hint(None, text, dialect_name)
def palette(self, alpha='natural'): """Returns a palette that is a sequence of 3-tuples or 4-tuples, synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These chunks should have already been processed (for example, by calling the :meth:`preamble` method). All the tuples are the same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when there is a ``tRNS`` chunk. Assumes that the image is colour type 3 and therefore a ``PLTE`` chunk is required. If the `alpha` argument is ``'force'`` then an alpha channel is always added, forcing the result to be a sequence of 4-tuples. """ if not self.plte: raise FormatError( "Required PLTE chunk is missing in colour type 3 image.") plte = group(array('B', self.plte), 3) if self.trns or alpha == 'force': trns = array('B', self.trns or '') trns.extend([255]*(len(plte)-len(trns))) plte = list(map(operator.add, plte, group(trns, 1))) return plte
def test_iterator_usage(self): class SequenceClass: def __init__(self, n): self.n = n def __getitem__(self, i): if 0 <= i < self.n: return i else: raise IndexError from operator import add self.assertEqual(self.func(add, SequenceClass(5)), 10) self.assertEqual(self.func(add, SequenceClass(5), 42), 52) self.assertRaises(TypeError, self.func, add, SequenceClass(0)) self.assertEqual(self.func(add, SequenceClass(0), 42), 42) self.assertEqual(self.func(add, SequenceClass(1)), 0) self.assertEqual(self.func(add, SequenceClass(1), 42), 42) d = {"one": 1, "two": 2, "three": 3} self.assertEqual(self.func(add, d), "".join(d.keys()))
def _test_quantity_add_sub(self, unit, func): x = self.Q_(unit, 'centimeter') y = self.Q_(unit, 'inch') z = self.Q_(unit, 'second') a = self.Q_(unit, None) func(op.add, x, x, self.Q_(unit + unit, 'centimeter')) func(op.add, x, y, self.Q_(unit + 2.54 * unit, 'centimeter')) func(op.add, y, x, self.Q_(unit + unit / (2.54 * unit), 'inch')) func(op.add, a, unit, self.Q_(unit + unit, None)) self.assertRaises(DimensionalityError, op.add, 10, x) self.assertRaises(DimensionalityError, op.add, x, 10) self.assertRaises(DimensionalityError, op.add, x, z) func(op.sub, x, x, self.Q_(unit - unit, 'centimeter')) func(op.sub, x, y, self.Q_(unit - 2.54 * unit, 'centimeter')) func(op.sub, y, x, self.Q_(unit - unit / (2.54 * unit), 'inch')) func(op.sub, a, unit, self.Q_(unit - unit, None)) self.assertRaises(DimensionalityError, op.sub, 10, x) self.assertRaises(DimensionalityError, op.sub, x, 10) self.assertRaises(DimensionalityError, op.sub, x, z)
def exc_iter(*args): """ Iterate over Cartesian product of *args, and if an exception is raised, add information of the current iterate. """ value = [None] def iterate(): for v in itertools.product(*args): value[0] = v yield v try: yield iterate() except: import traceback msg = "At: %r\n%s" % (repr(value[0]), traceback.format_exc()) raise AssertionError(msg)
def test_safe_binop(): # Test checked arithmetic routines ops = [ (operator.add, 1), (operator.sub, 2), (operator.mul, 3) ] with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it: for xop, a, b in it: pyop, op = xop c = pyop(a, b) if not (INT64_MIN <= c <= INT64_MAX): assert_raises(OverflowError, mt.extint_safe_binop, a, b, op) else: d = mt.extint_safe_binop(a, b, op) if c != d: # assert_equal is slow assert_equal(d, c)
def check_add(Poly): # This checks commutation, not numerical correctness c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = p1 + p2 assert_poly_almost_equal(p2 + p1, p3) assert_poly_almost_equal(p1 + c2, p3) assert_poly_almost_equal(c2 + p1, p3) assert_poly_almost_equal(p1 + tuple(c2), p3) assert_poly_almost_equal(tuple(c2) + p1, p3) assert_poly_almost_equal(p1 + np.array(c2), p3) assert_poly_almost_equal(np.array(c2) + p1, p3) assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.add, p1, Chebyshev([0])) else: assert_raises(TypeError, op.add, p1, Polynomial([0]))
def callback(self, player): parent = self.parent if not parent: return shared = parent.shared_trigger_objects[self.type] status = False if not player.disconnected and player.world_object: x1, y1, z1 = parent.x + 0.5, parent.y + 0.5, parent.z + 0.5 x2, y2, z2 = player.world_object.position.get() status = collision_3d(x1, y1, z1, x2, y2, z2, self.radius) if status: shared.add(player) else: shared.discard(player) status = bool(shared) if self.status != status: self.status = status if self.parent: parent.trigger_check()
def on_exit(self, protocol, player): button = self.button if not button: return S_COMMAND_CANCEL.format(command=self.name) if self.action in PLATFORM_ACTION_FUNCTIONS: if not self.platform: return S_COMMAND_CANCEL.format(command=self.name) new_action = PlatformAction(protocol, self.platform.id, self.action, self.kwargs) elif self.action in PLAYER_ACTION_FUNCTIONS: new_action = PlayerAction(protocol, self.action, self.kwargs) if not self.add: button.actions = [] button.actions.append(new_action) return S_ACTION_ADDED.format(action=self.action, label=button.label)
def on_exit(self, protocol, player): button = self.button if not button: return S_COMMAND_CANCEL.format(command=self.name) if self.trigger == 'press': new_trigger = PressTrigger(protocol) elif self.trigger == 'distance': new_trigger = DistanceTrigger(protocol, self.radius) elif self.trigger == 'track': new_trigger = TrackTrigger(protocol, self.radius) elif self.trigger == 'height': if not self.platform: return S_COMMAND_CANCEL.format(command=self.name) new_trigger = HeightTrigger( protocol, self.platform.id, self.height) new_trigger.negate = self.negate if not self.add: button.clear_triggers() button.add_trigger(new_trigger) return S_TRIGGER_ADDED.format(trigger=self.trigger, label=button.label)
def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None): """ Return a new DStream in which each RDD contains the count of distinct elements in RDDs in a sliding window over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. """ keyed = self.map(lambda x: (x, 1)) counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub, windowDuration, slideDuration, numPartitions) return counted.filter(lambda kv: kv[1] > 0)
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash): """ Merge the values for each key using an associative and commutative reduce function. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. Output will be partitioned with C{numPartitions} partitions, or the default parallelism level if C{numPartitions} is not specified. Default partitioner is hash-partition. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKey(add).collect()) [('a', 2), ('b', 1)] """ return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func): """ Merge the values for each key using an associative and commutative reduce function, but return the results immediately to the master as a dictionary. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKeyLocally(add).items()) [('a', 2), ('b', 1)] """ def reducePartition(iterator): m = {} for k, v in iterator: m[k] = func(m[k], v) if k in m else v yield m def mergeMaps(m1, m2): for k, v in m2.items(): m1[k] = func(m1[k], v) if k in m1 else v return m1 return self.mapPartitions(reducePartition).reduce(mergeMaps)
def fullOuterJoin(self, other, numPartitions=None): """ Perform a right outer join of C{self} and C{other}. For each element (k, v) in C{self}, the resulting RDD will either contain all pairs (k, (v, w)) for w in C{other}, or the pair (k, (v, None)) if no elements in C{other} have key k. Similarly, for each element (k, w) in C{other}, the resulting RDD will either contain all pairs (k, (v, w)) for v in C{self}, or the pair (k, (None, w)) if no elements in C{self} have key k. Hash-partitions the resulting RDD into the given number of partitions. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2), ("c", 8)]) >>> sorted(x.fullOuterJoin(y).collect()) [('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))] """ return python_full_outer_join(self, other, numPartitions) # TODO: add option to control map-side combining # portable_hash is used as default, because builtin hash of None is different # cross machines.
def createSummary(self, blocks): if all(isinstance(block, Block) for block in blocks): utxos = [] for blk in blocks: for transaction in blk.transactions: utxos += transaction.utxos outgoing = [(utxo.sender, utxo.value) for utxo in utxos] incoming = [(utxo.receiver, utxo.value) for utxo in utxos] senders = set([utxo.sender for utxo in utxos]) receivers = set([utxo.receiver for utxo in utxos]) for wallet in list(senders) + list(receivers): self.changes[wallet] = 0 for sender in senders: self.changes[sender] = -sum(map(lambda v: v[1], filter(lambda u: u[0] == sender, outgoing))) for receiver in receivers: self.changes[receiver] += sum(map(lambda v: v[1], filter(lambda u: u[0] == receiver, incoming))) elif all(isinstance(block, SummaryBlock) for block in blocks): all_keys = reduce(operator.add,[list(block.changes.keys()) for block in blocks]) for key in all_keys: self.changes[key] = 0 for block in blocks: for key, value in block.changes.items(): self.changes[key] += value else: raise TypeError('Invalid typing of blocks')
def test_reduceby(self): data = [1, 2, 3, 4, 5] def iseven(x): return x % 2 == 0 assert reduceby(iseven, add, data, 0) == {False: 9, True: 6} assert reduceby(iseven, mul, data, 1) == {False: 15, True: 8} projects = [{'name': 'build roads', 'state': 'CA', 'cost': 1000000}, {'name': 'fight crime', 'state': 'IL', 'cost': 100000}, {'name': 'help farmers', 'state': 'IL', 'cost': 2000000}, {'name': 'help farmers', 'state': 'CA', 'cost': 200000}] assert reduceby(lambda x: x['state'], lambda acc, x: acc + x['cost'], projects, 0) == {'CA': 1200000, 'IL': 2100000} assert reduceby('state', lambda acc, x: acc + x['cost'], projects, 0) == {'CA': 1200000, 'IL': 2100000}
def test_join(self): names = [(1, 'one'), (2, 'two'), (3, 'three')] fruit = [('apple', 1), ('orange', 1), ('banana', 2), ('coconut', 2)] def addpair(pair): return pair[0] + pair[1] result = set(starmap(add, join(first, names, second, fruit))) expected = {((1, 'one', 'apple', 1)), ((1, 'one', 'orange', 1)), ((2, 'two', 'banana', 2)), ((2, 'two', 'coconut', 2))} assert result == expected result = set(starmap(add, join(first, names, second, fruit, left_default=no_default2, right_default=no_default2))) assert result == expected
def create_batches(data, batch_size, padding_id, label=True, sort=True, shuffle=True): if label: for d in data: assert d[1] != -1 if sort: data = sorted(data, key=lambda x: len(x[0]), reverse=True) batches = [] for i in xrange(0, len(data), batch_size): #idxs, idys input_lst = create_input(data[i:i+batch_size], padding_id) batches.append(input_lst) if shuffle: idx = np.random.permutation(len(batches)) new_batches = [batches[i] for i in idx] new_data = reduce(operator.add, [data[i*batch_size:(i+1)*batch_size] for i in idx]) batches, data = new_batches, new_data assert len(new_data) == len(data) if not label: # set all label to 0 for b in batches: b[1][:] = 0 return batches, data
def recursive_repr(func): """Decorator to prevent infinite repr recursion.""" repr_running = set() @wraps(func) def wrapper(self): key = id(self), get_ident() if key in repr_running: return '...' repr_running.add(key) try: return func(self) finally: repr_running.discard(key) return wrapper
def add(self, val): """Add the element *val* to the list.""" _maxes, _lists = self._maxes, self._lists if _maxes: pos = bisect_right(_maxes, val) if pos == len(_maxes): pos -= 1 _maxes[pos] = val _lists[pos].append(val) else: insort(_lists[pos], val) self._expand(pos) else: _maxes.append(val) _lists.append([val]) self._len += 1
def update(self, iterable): """Update the list by adding all elements from *iterable*.""" _maxes, _lists = self._maxes, self._lists values = sorted(iterable) if _maxes: if len(values) * 4 >= self._len: values.extend(chain.from_iterable(_lists)) values.sort() self.clear() else: _add = self.add for val in values: _add(val) return _load, _index = self._load, self._index _lists.extend(values[pos:(pos + _load)] for pos in range(0, len(values), _load)) _maxes.extend(sublist[-1] for sublist in _lists) self._len = len(values) del _index[:]
def update(self, iterable): """Update the list by adding all elements from *iterable*.""" _maxes, _lists, _keys = self._maxes, self._lists, self._keys values = sorted(iterable, key=self._key) if _maxes: if len(values) * 4 >= self._len: values.extend(chain.from_iterable(_lists)) values.sort(key=self._key) self.clear() else: _add = self.add for val in values: _add(val) return _load, _index = self._load, self._index _lists.extend(values[pos:(pos + _load)] for pos in range(0, len(values), _load)) _keys.extend(list(map(self._key, _list)) for _list in _lists) _maxes.extend(sublist[-1] for sublist in _keys) self._len = len(values) del _index[:]
def _columns_plus_names(self): if self.use_labels: names = set() def name_for_col(c): if c._label is None: return (None, c) name = c._label if name in names: name = c.anon_label else: names.add(name) return name, c return [ name_for_col(c) for c in util.unique_list( _select_iterables(self._raw_columns)) ] else: return [ (None, c) for c in util.unique_list( _select_iterables(self._raw_columns)) ]
def operate(self, left, right, operation): """ Do operation on colors args: left (str): left side right (str): right side operation (str): Operation returns: str """ operation = { '+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv }.get(operation) return operation(left, right)
def mark_todelete(cls, drive_id): """Marks PD for deletion. Also creates a new one PD with the same 'name' and owner, but with different physical 'drive_name'. It is needed for possibility to fast relaunch the pod right after PD deletion. If we will use the same physical drive name, then we have to wait until old drive will be actually deleted. """ pd = cls.query.filter(cls.id == drive_id, cls.pod_id.is_(None)).first() if not pd or pd.state == PersistentDiskStatuses.TODELETE: return new_drive_name = cls._increment_drive_name(pd) old_name = pd.name # change name for deleting PD to prevent conflict of uniques and # to hide PD from search by name pd.name = uuid.uuid4().hex pd.state = PersistentDiskStatuses.TODELETE db.session.flush() new_pd = cls( drive_name=new_drive_name, name=old_name, owner_id=pd.owner_id, size=pd.size, state=PersistentDiskStatuses.DELETED ) db.session.add(new_pd) db.session.commit() return new_pd
def scanr(col, func=add, acc=None): ''' Use a given accumulator value to build a list of values obtained by repeatedly applying acc = func(next(list), acc) from the right. WARNING: Right folds and scans will blow up for infinite generators! ''' try: col = reversed(col) except TypeError: col = reversed(list(col)) if acc is not None: col = chain([acc], col) return list(itools.accumulate(col, func))
def howFarAreWe(model, against, sizeAgainst): # Ignore the rating column againstNoRatings = against.map(lambda x: (int(x[0]), int(x[1])) ) # Keep the rating to compare against againstWiRatings = against.map(lambda x: ((int(x[0]),int(x[1])), int(x[2])) ) # Make a prediction and map it for later comparison # The map has to be ((user,product), rating) not ((product,user), rating) predictions = model.predictAll(againstNoRatings).map(lambda p: ( (p[0],p[1]), p[2]) ) # Returns the pairs (prediction, rating) predictionsAndRatings = predictions.join(againstWiRatings).values() # Returns the variance return sqrt(predictionsAndRatings.map(lambda s: (s[0] - s[1]) ** 2).reduce(add) / float(sizeAgainst)) #[END how_far] # Read the data from the Cloud SQL # Create dataframes
def run(self, job, computation, node): def _run(self, task=None): self.task.send({'req': 'run', 'auth': computation._auth, 'job': job, 'client': task}) rtask = yield task.receive(timeout=MsgTimeout) # currently fault-tolerancy is not supported, so clear job's # args to save space job.args = job.kwargs = None if isinstance(rtask, Task): # TODO: keep func too for fault-tolerance job.done = pycos.Event() self.rtasks[rtask] = (rtask, job) if self.askew_results: msg = self.askew_results.pop(rtask, None) if msg: Scheduler.__status_task.send(msg) else: logger.debug('failed to create rtask: %s', rtask) if job.cpu: self.avail.set() node.cpus_used -= 1 node.load = float(node.cpus_used) / len(node.servers) self.scheduler._avail_nodes.add(node) self.scheduler._nodes_avail.set() node.avail.set() raise StopIteration(rtask) rtask = yield SysTask(_run, self).finish() job.client.send(rtask)
def status(self): pending = sum(node.cpus_used for node in self._nodes.itervalues()) servers = reduce(operator.add, [node.servers.keys() for node in self._nodes.itervalues()], []) return {'Client': self._cur_computation._pulse_task.location if self._cur_computation else '', 'Pending': pending, 'Nodes': self._nodes.keys(), 'Servers': servers }
def status(self): pending = sum(node.cpus_used for node in self._nodes.values()) servers = functools.reduce(operator.add, [list(node.servers.keys()) for node in self._nodes.values()], []) return {'Client': self._cur_computation._pulse_task.location if self._cur_computation else '', 'Pending': pending, 'Nodes': list(self._nodes.keys()), 'Servers': servers }
def __add__(self, trc): return self.apply_op2(trc, operator.add)
def __init__(self, capacity): super(SumSegmentTree, self).__init__( capacity=capacity, operation=operator.add, neutral_element=0.0 )
def test_preduce_one_process(self): """ Test that preduce reduces to functools.reduce for a single process """ integers = list(range(0, 10)) preduce_results = preduce(add, integers, processes = 1) reduce_results = reduce(add, integers) self.assertEqual(preduce_results, reduce_results)
def test_preduce_multiple_processes(self): """ Test that preduce reduces to functools.reduce for a single process """ integers = list(range(0, 10)) preduce_results = preduce(add, integers, processes = 2) reduce_results = reduce(add, integers) self.assertEqual(preduce_results, reduce_results)
def test_on_numpy_arrays(self): """ Test sum of numpy arrays as parallel reduce""" arrays = [np.zeros((32,32)) for _ in range(10)] s = preduce(add, arrays, processes = 2) self.assertTrue(np.allclose(s, arrays[0]))
def equalize(image, mask=None): """ Equalize the image histogram. This function applies a non-linear mapping to the input image, in order to create a uniform distribution of grayscale values in the output image. :param image: The image to equalize. :param mask: An optional mask. If given, only the pixels selected by the mask are included in the analysis. :return: An image. """ if image.mode == "P": image = image.convert("RGB") h = image.histogram(mask) lut = [] for b in range(0, len(h), 256): histo = [_f for _f in h[b:b+256] if _f] if len(histo) <= 1: lut.extend(list(range(256))) else: step = (functools.reduce(operator.add, histo) - histo[-1]) // 255 if not step: lut.extend(list(range(256))) else: n = step // 2 for i in range(256): lut.append(n // step) n = n + h[i+b] return _lut(image, lut)
def _getcount(self): "Get total number of pixels in each layer" v = [] for i in range(0, len(self.h), 256): v.append(functools.reduce(operator.add, self.h[i:i+256])) return v
def addc(a: int, b: int) -> int: return operator.add(a, b)