Python six.moves.cPickle 模块,dumps() 实例源码

我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用six.moves.cPickle.dumps()

项目:yt    作者:yt-project    | 项目源码 | 文件源码
def __new__(cls, filename=None, *args, **kwargs):
        if not isinstance(filename, string_types):
            obj = object.__new__(cls)
            # The Stream frontend uses a StreamHandler object to pass metadata
            # to __init__.
            is_stream = (hasattr(filename, 'get_fields') and
                         hasattr(filename, 'get_particle_type'))
            if not is_stream:
                obj.__init__(filename, *args, **kwargs)
            return obj
        apath = os.path.abspath(filename)
        cache_key = (apath, cPickle.dumps(args), cPickle.dumps(kwargs))
        if ytcfg.getboolean("yt","skip_dataset_cache"):
            obj = object.__new__(cls)
        elif cache_key not in _cached_datasets:
            obj = object.__new__(cls)
            if obj._skip_cache is False:
                _cached_datasets[cache_key] = obj
        else:
            obj = _cached_datasets[cache_key]
        return obj
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def check_pickling(self, x_data):
        x = chainer.Variable(x_data)
        y = self.link(x)
        y_data1 = y.data

        del x, y

        pickled = pickle.dumps(self.link, -1)
        del self.link
        self.link = pickle.loads(pickled)

        x = chainer.Variable(x_data)
        y = self.link(x)
        y_data2 = y.data

        gradient_check.assert_allclose(y_data1, y_data2, atol=0, rtol=0)
项目:cloud-custodian    作者:capitalone    | 项目源码 | 文件源码
def save(self, key, data):
        try:
            with open(self.cache_path, 'wb') as fh:
                self.data[pickle.dumps(key)] = data
                pickle.dump(self.data, fh, protocol=2)
        except Exception as e:
            log.warning("Could not save cache %s err: %s" % (
                self.cache_path, e))
            if not os.path.exists(self.cache_path):
                directory = os.path.dirname(self.cache_path)
                log.info('Generating Cache directory: %s.' % directory)
                try:
                    os.makedirs(directory)
                except Exception as e:
                    log.warning("Could not create directory: %s err: %s" % (
                        directory, e))
项目:odin    作者:imito    | 项目源码 | 文件源码
def update(self, items):
    if self.read_only:
      return
    query = """UPDATE {tb} SET value=(?) WHERE key=("?");"""
    if isinstance(items, Mapping):
      items = items.items()
    # ====== check if update is in cache ====== #
    db_update = []
    for key, value in items:
      key = str(key)
      if key in self.current_cache:
        self.current_cache[key] = value
      else:
        db_update.append((marshal.dumps(value), key))
    # ====== perform DB update ====== #
    self.cursor.executemany(query.format(tb=self._current_table), db_update)
    self.connection.commit()
    return self
项目:odin    作者:imito    | 项目源码 | 文件源码
def test_complex_transform(self):
        with TemporaryDirectory() as temp:
            from sklearn.pipeline import Pipeline
            path = os.path.join(temp, 'audio.sph')
            urlretrieve(filename=path,
                        url='https://s3.amazonaws.com/ai-datasets/sw02001.sph')
            f = Pipeline([
                ('step1', model.SpeechTransform('mspec', fs=8000, vad=True)),
                ('step2', model.Transform(lambda x: (x[0][:, :40],
                                                     x[1].astype(str)))),
                ('step3', model.Transform(lambda x: (np.sum(x[0]),
                                                    ''.join(x[1].tolist()))))
            ])
            x = f.transform(path)
            f = cPickle.loads(cPickle.dumps(f))
            y = f.transform(path)
            self.assertEqual(x[0], y[0])
            self.assertEqual(y[0], -3444229.0)
            self.assertEqual(x[1], y[1])
项目:sqlalchemy-validation    作者:blazelibs    | 项目源码 | 文件源码
def test_pickling(self):
        so = ex.SomeObj(minlen=5)
        assert so._sav.entity.minlen == 5
        pstr = pickle.dumps(so)
        del so

        so2 = pickle.loads(pstr)
        assert so2._sav.entity.minlen == 5

        # make sure it's a weakref
        vh = so2._sav
        del so2
        gc.collect()

        try:
            vh.entity
            assert False, 'expected exception'
        except EntityRefMissing:
            pass
项目:chainer-glu    作者:musyoku    | 项目源码 | 文件源码
def check_pickling(self, x_data):
        x = chainer.Variable(x_data)
        y = self.link(x)
        y_data1 = y.data

        del x, y

        pickled = pickle.dumps(self.link, -1)
        del self.link
        self.link = pickle.loads(pickled)

        x = chainer.Variable(x_data)
        y = self.link(x)
        y_data2 = y.data

        testing.assert_allclose(y_data1, y_data2, atol=0, rtol=0)
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def record_metadata_solver(self, recording_requester):
        """
        Record solver metadata.

        Parameters
        ----------
        recording_requester: <Solver>
            The Solver that would like to record its metadata.
        """
        path = recording_requester._system.pathname
        solver_class = type(recording_requester).__name__
        if not path:
            path = 'root'
        id = "{}.{}".format(path, solver_class)

        solver_options = pickle.dumps(recording_requester.options,
                                      pickle.HIGHEST_PROTOCOL)

        with self.con:
            self.con.execute(
                "INSERT INTO solver_metadata(id, solver_options, solver_class) "
                "VALUES(?,?,?)", (id, sqlite3.Binary(solver_options), solver_class))
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def _record_driver_metadata(self, driver_class, model_viewer_data):
        """
        Record driver metadata.

        Parameters
        ----------
        driver_class : str
            The name of the driver type.
        model_viewer_data : JSON Object
            All model viewer data, including variable names relationships.
        """
        driver_metadata_dict = {
            'id': driver_class,
            'model_viewer_data': model_viewer_data
        }
        driver_metadata = json.dumps(driver_metadata_dict)

        requests.post(self._endpoint + '/' + self._case_id + '/driver_metadata',
                      data=driver_metadata, headers=self._headers)
项目:pywren    作者:pywren    | 项目源码 | 文件源码
def test_map(self):

        def plus_one(x):
            return x + 1
        N = 10

        x = np.arange(N)
        futures_original = self.wrenexec.map(plus_one, x)
        futures_str = pickle.dumps(futures_original)
        futures = pickle.loads(futures_str)

        result_count = 0
        while result_count < N:

            fs_dones, fs_notdones = pywren.wait(futures)
            result_count = len(fs_dones)

        res = np.array([f.result() for f in futures])
        np.testing.assert_array_equal(res, x + 1)
项目:deb-python-cassandra-driver    作者:openstack    | 项目源码 | 文件源码
def _serialize_key(self, key):
        return cPickle.dumps(key)
项目:pybel    作者:pybel    | 项目源码 | 文件源码
def to_bytes(graph, protocol=HIGHEST_PROTOCOL):
    """Converts a graph to bytes with pickle. Note that the pickle module has some incompatibilities between Python
    2 and 3. To export a universally importable pickle, choose 0, 1, or 2.

    :param BELGraph graph: A BEL network
    :param int protocol: Pickling protocol to use
    :return: Pickled bytes representing the graph
    :rtype: bytes

    .. seealso:: https://docs.python.org/3.6/library/pickle.html#data-stream-format
    """
    raise_for_not_bel(graph)
    return dumps(graph, protocol=protocol)
项目:serialtime    作者:ianlini    | 项目源码 | 文件源码
def save_pklgz(obj, path, log_description=None, logger=None,
               logging_level=logging.INFO, verbose_start=True,
               verbose_end=True, end_in_new_line=True, log_prefix="..."):
    if log_description is None:
        log_description = "Pickling to " + (path)
    with SimpleTimer(log_description, logger, logging_level, verbose_start,
                     verbose_end, end_in_new_line, log_prefix):
        pkl = cPickle.dumps(obj, protocol=cPickle.HIGHEST_PROTOCOL)
        with gzip.open(path, "wb") as fp:
            fp.write(pkl)
项目:rltools    作者:sisl    | 项目源码 | 文件源码
def _dumps(o):
    return cPickle.dumps(o, protocol=-1)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def test_pickle_cpu(self):
        fs2_serialized = pickle.dumps(self.fs2)
        fs2_loaded = pickle.loads(fs2_serialized)
        self.assertTrue((self.fs2.b.p.data == fs2_loaded.b.p.data).all())
        self.assertTrue(
            (self.fs2.fs1.a.p.data == fs2_loaded.fs1.a.p.data).all())
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def test_pickle_gpu(self):
        self.fs2.to_gpu()
        fs2_serialized = pickle.dumps(self.fs2)
        fs2_loaded = pickle.loads(fs2_serialized)
        fs2_loaded.to_cpu()
        self.fs2.to_cpu()

        self.assertTrue((self.fs2.b.p.data == fs2_loaded.b.p.data).all())
        self.assertTrue(
            (self.fs2.fs1.a.p.data == fs2_loaded.fs1.a.p.data).all())
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def test_pickle_cpu(self):
        s = pickle.dumps(self.fs)
        fs2 = pickle.loads(s)
        self.check_equal_fs(self.fs, fs2)
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def test_pickle_gpu(self):
        self.fs.to_gpu()
        s = pickle.dumps(self.fs)
        fs2 = pickle.loads(s)

        self.fs.to_cpu()
        fs2.to_cpu()
        self.check_equal_fs(self.fs, fs2)
项目:cloud-custodian    作者:capitalone    | 项目源码 | 文件源码
def get(self, key):
        return self.data.get(pickle.dumps(key))
项目:cloud-custodian    作者:capitalone    | 项目源码 | 文件源码
def save(self, key, data):
        self.data[pickle.dumps(key)] = data
项目:cloud-custodian    作者:capitalone    | 项目源码 | 文件源码
def get(self, key):
        k = pickle.dumps(key)
        return self.data.get(k)
项目:cloud-custodian    作者:capitalone    | 项目源码 | 文件源码
def test_get(self):
        #mock the pick and set it to the data variable
        test_pickle = pickle.dumps(
            {pickle.dumps(self.test_key): self.test_value}, protocol=2)
        self.test_cache.data = pickle.loads(test_pickle)

        #assert
        self.assertEquals(self.test_cache.get(self.test_key), self.test_value)
        self.assertEquals(self.test_cache.get(self.bad_key), None)
项目:odin    作者:imito    | 项目源码 | 文件源码
def __setstate__(self, states):
    path, read_only, cache_size = states
    if not os.path.exists(path):
      raise ValueError("Cannot find store NoSQL database at path: %s."
                       "If you have moved the database, the dumps from "
                       "cannot restore the previous intance." % path)
    self._restore_dict(path, read_only, cache_size)
    self._path = path
    self._read_only = read_only
    self._cache_size = cache_size
项目:odin    作者:imito    | 项目源码 | 文件源码
def _flush(self, save_all=False):
    curr_tab = self.current_table
    tables = self.get_all_tables() if save_all else [curr_tab]
    for tab in tables:
      self.set_table(tab)
      if len(self.current_cache) > 0:
        self.cursor.executemany(
            "INSERT INTO {tb} VALUES (?, ?)".format(tb=tab),
            [(str(k), marshal.dumps(v.tolist()) if isinstance(v, np.ndarray)
              else marshal.dumps(v))
             for k, v in self.current_cache.items()])
        self.connection.commit()
        self.current_cache.clear()
    # restore the last table
    return self.set_table(curr_tab)
项目:odin    作者:imito    | 项目源码 | 文件源码
def func_to_str(func):
  # conver to byte
  code = cPickle.dumps(array("B", marshal.dumps(func.__code__)),
                       protocol=cPickle.HIGHEST_PROTOCOL)
  closure = None
  if func.__closure__ is not None:
    print("[WARNING] function: %s contains closure, which cannot be "
          "serialized." % str(func))
    closure = tuple([c.cell_contents for c in func.__closure__])
  defaults = func.__defaults__
  return (code, closure, defaults)
项目:odin    作者:imito    | 项目源码 | 文件源码
def __init__(self, func, *args, **kwargs):
    super(functionable, self).__init__()
    self._function = func
    self.__name__ = self._function.__name__
    try: # sometime cannot get the source
      self._source = inspect.getsource(self._function)
    except Exception as e:
      print("[WARNING] Cannot get source code of function:", func,
            "(error:%s)" % str(e))
      self._source = None
    # try to pickle the function directly
    try:
      self._sandbox = cPickle.dumps(self._function,
          protocol=cPickle.HIGHEST_PROTOCOL)
    except Exception:
      self._sandbox = _serialize_function_sandbox(func, self._source)
    # ====== store argsmap ====== #
    argspec = inspect.getargspec(func)
    argsmap = OrderedDict([(i, _ArgPlaceHolder_()) for i in argspec.args])
    # store defaults
    if argspec.defaults is not None:
      for name, arg in zip(argspec.args[::-1], argspec.defaults[::-1]):
        argsmap[name] = arg
    # update positional arguments
    for name, arg in zip(argspec.args, args):
      argsmap[name] = arg
    # update kw arguments
    argsmap.update(kwargs)
    self._argsmap = argsmap

  # ==================== Pickling methods ==================== #
项目:odin    作者:imito    | 项目源码 | 文件源码
def is_pickleable(x):
  try:
    cPickle.dumps(x, protocol=cPickle.HIGHEST_PROTOCOL)
    return True
  except cPickle.PickleError:
    return False
项目:batchup    作者:Britefury    | 项目源码 | 文件源码
def _serialise_args(shared_objects, args):  # pragma: no cover
    serialised_args = []
    for arg in args:
        if isinstance(arg, _SharedConstant):
            value = arg.value
            key = id(value)
            if key not in shared_objects:
                shared_objects[key] = dumps(value)
            ref = _SharedRef(key=key)
            serialised = ref
        else:
            serialised = arg
        serialised_args.append(serialised)
    return tuple(serialised_args)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pickle_unpickle_with_reoptimization():
    mode = theano.config.mode
    if mode in ["DEBUG_MODE", "DebugMode"]:
        mode = "FAST_RUN"
    x1 = T.fmatrix('x1')
    x2 = T.fmatrix('x2')
    x3 = theano.shared(numpy.ones((10, 10), dtype=floatX))
    x4 = theano.shared(numpy.ones((10, 10), dtype=floatX))
    y = T.sum(T.sum(T.sum(x1 ** 2 + x2) + x3) + x4)

    updates = OrderedDict()
    updates[x3] = x3 + 1
    updates[x4] = x4 + 1
    f = theano.function([x1, x2], y, updates=updates, mode=mode)

    # now pickle the compiled theano fn
    string_pkl = pickle.dumps(f, -1)

    in1 = numpy.ones((10, 10), dtype=floatX)
    in2 = numpy.ones((10, 10), dtype=floatX)

    # test unpickle with optimization
    default = theano.config.reoptimize_unpickled_function
    try:
        # the default is True
        theano.config.reoptimize_unpickled_function = True
        f_ = pickle.loads(string_pkl)
        assert f(in1, in2) == f_(in1, in2)
    finally:
        theano.config.reoptimize_unpickled_function = default
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pickle_unpickle_without_reoptimization():
    mode = theano.config.mode
    if mode in ["DEBUG_MODE", "DebugMode"]:
        mode = "FAST_RUN"
    x1 = T.fmatrix('x1')
    x2 = T.fmatrix('x2')
    x3 = theano.shared(numpy.ones((10, 10), dtype=floatX))
    x4 = theano.shared(numpy.ones((10, 10), dtype=floatX))
    y = T.sum(T.sum(T.sum(x1**2 + x2) + x3) + x4)

    updates = OrderedDict()
    updates[x3] = x3 + 1
    updates[x4] = x4 + 1
    f = theano.function([x1, x2], y, updates=updates, mode=mode)

    # now pickle the compiled theano fn
    string_pkl = pickle.dumps(f, -1)

    # compute f value
    in1 = numpy.ones((10, 10), dtype=floatX)
    in2 = numpy.ones((10, 10), dtype=floatX)

    # test unpickle without optimization
    default = theano.config.reoptimize_unpickled_function
    try:
        # the default is True
        theano.config.reoptimize_unpickled_function = False
        f_ = pickle.loads(string_pkl)
        assert f(in1, in2) == f_(in1, in2)
    finally:
        theano.config.reoptimize_unpickled_function = default
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pickle_bug(self):
        # Regression test for bug fixed in 24d4fd291054.
        o = Prod()
        s = pickle.dumps(o, protocol=-1)
        o = pickle.loads(s)
        pickle.dumps(o)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_pickle(self):
        a = T.scalar()  # the a is for 'anonymous' (un-named).
        x, s = T.scalars('xs')

        f = function([x, In(a, value=1.0, name='a'),
                      In(s, value=0.0, update=s + a * x, mutable=True)], s + a * x)

        try:
            # Note that here we also test protocol 0 on purpose, since it
            # should work (even though one should not use it).
            g = pickle.loads(pickle.dumps(f, protocol=0))
            g = pickle.loads(pickle.dumps(f, protocol=-1))
        except NotImplementedError as e:
            if e[0].startswith('DebugMode is not picklable'):
                return
            else:
                raise
        # if they both return, assume  that they return equivalent things.
        # print [(k,id(k)) for k in f.finder.keys()]
        # print [(k,id(k)) for k in g.finder.keys()]

        self.assertFalse(g.container[0].storage is f.container[0].storage)
        self.assertFalse(g.container[1].storage is f.container[1].storage)
        self.assertFalse(g.container[2].storage is f.container[2].storage)
        self.assertFalse(x in g.container)
        self.assertFalse(x in g.value)

        self.assertFalse(g.value[1] is f.value[1])  # should not have been copied
        self.assertFalse(g.value[2] is f.value[2])  # should have been copied because it is mutable.
        self.assertFalse((g.value[2] != f.value[2]).any())  # its contents should be identical

        self.assertTrue(f(2, 1) == g(2))  # they should be in sync, default value should be copied.
        self.assertTrue(f(2, 1) == g(2))  # they should be in sync, default value should be copied.
        f(1, 2)  # put them out of sync
        self.assertFalse(f(1, 2) == g(1, 2))  # they should not be equal anymore.
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def startup(self, recording_requester):
        """
        Prepare for a new run and create/update the abs2prom and prom2abs variables.

        Parameters
        ----------
        recording_requester :
            Object to which this recorder is attached.
        """
        super(SqliteRecorder, self).startup(recording_requester)

        # grab the system
        if isinstance(recording_requester, Driver):
            system = recording_requester._problem.model
        elif isinstance(recording_requester, System):
            system = recording_requester
        else:
            system = recording_requester._system

        # merge current abs2prom and prom2abs with this system's version
        for io in ['input', 'output']:
            for v in system._var_abs2prom[io]:
                self._abs2prom[io][v] = system._var_abs2prom[io][v]
            for v in system._var_allprocs_prom2abs_list[io]:
                if v not in self._prom2abs[io]:
                    self._prom2abs[io][v] = system._var_allprocs_prom2abs_list[io][v]
                else:
                    self._prom2abs[io][v] = list(set(self._prom2abs[io][v]) |
                                                 set(system._var_allprocs_prom2abs_list[io][v]))

        # store the updated abs2prom and prom2abs
        abs2prom = pickle.dumps(self._abs2prom)
        prom2abs = pickle.dumps(self._prom2abs)
        if self._open_close_sqlite:
            with self.con:
                self.con.execute("UPDATE metadata SET abs2prom=?, prom2abs=?",
                                 (abs2prom, prom2abs))
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def record_metadata_driver(self, recording_requester):
        """
        Record driver metadata.

        Parameters
        ----------
        recording_requester: <Driver>
            The Driver that would like to record its metadata.
        """
        driver_class = type(recording_requester).__name__
        model_viewer_data = pickle.dumps(recording_requester._model_viewer_data,
                                         pickle.HIGHEST_PROTOCOL)
        with self.con:
            self.con.execute("INSERT INTO driver_metadata(id, model_viewer_data) VALUES(?,?)",
                             (driver_class, sqlite3.Binary(model_viewer_data)))
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def record_metadata_system(self, recording_requester):
        """
        Record system metadata.

        Parameters
        ----------
        recording_requester: <System>
            The System that would like to record its metadata.
        """
        # Cannot handle PETScVector yet
        from openmdao.api import PETScVector
        if PETScVector and isinstance(recording_requester._outputs, PETScVector):
            return  # Cannot handle PETScVector yet

        # collect scaling arrays
        scaling_vecs = {}
        for kind, odict in iteritems(recording_requester._vectors):
            scaling_vecs[kind] = scaling = {}
            for vecname, vec in iteritems(odict):
                scaling[vecname] = vec._scaling
        scaling_factors = pickle.dumps(scaling_vecs,
                                       pickle.HIGHEST_PROTOCOL)
        path = recording_requester.pathname
        if not path:
            path = 'root'
        with self.con:
            self.con.execute("INSERT INTO system_metadata(id, scaling_factors) \
                              VALUES(?,?)",
                             (path, sqlite3.Binary(scaling_factors)))
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def record_metadata_driver(self, recording_requester):
        """
        Record driver metadata.

        Parameters
        ----------
        recording_requester: <Driver>
            The Driver that would like to record its metadata.
        """
        driver_class = type(recording_requester).__name__
        model_viewer_data = json.dumps(recording_requester._model_viewer_data)
        self._record_driver_metadata(driver_class, model_viewer_data)
项目:kobo    作者:release-engineering    | 项目源码 | 文件源码
def test_pickle(self):
        self.test_file_name_property()
        name = "file"
        file1 = os.path.join(self.tmp_dir, name)
        wrap = FileWrapper(file1)
        pickled_data = pickle.dumps(wrap)
        wrap2 = pickle.loads(pickled_data)
        print(wrap2.file_path)
项目:kobo    作者:release-engineering    | 项目源码 | 文件源码
def test_pickle(self):
        rpm_version = [int(v) for v in getattr(rpm, '__version__', '0.0').split('.')]
        if rpm_version[0:2] < [4, 10]:
            warnings.warn('RPM header pickling unsupported in rpm %s' % rpm_version)
            return
        wrap = RpmWrapper(self.file_path)
        pickled_data = pickle.dumps(wrap)
        wrap2 = pickle.loads(pickled_data)
        self.assertEqual(wrap.name, wrap2.name)
项目:kobo    作者:release-engineering    | 项目源码 | 文件源码
def test_pickle(self):
        wrap = SimpleRpmWrapper(self.file_path)
        pickled_data = pickle.dumps(wrap)
        wrap2 = pickle.loads(pickled_data)
        self.assertEqual(wrap.name, wrap2.name)
项目:yelp_kafka    作者:Yelp    | 项目源码 | 文件源码
def __call__(self, *args, **kwargs):
        # If the function args cannot be used as a cache hash key, fail fast
        key = pickle.dumps((args, kwargs))
        try:
            return self.cache[key]
        except KeyError:
            value = self.func(*args, **kwargs)
            self.cache[key] = value
            return value
项目:python-dse-driver    作者:datastax    | 项目源码 | 文件源码
def _serialize_key(self, key):
        return cPickle.dumps(key)
项目:odin    作者:imito    | 项目源码 | 文件源码
def __init__(self, data_desc, dtype=None,
               batch_filter=None, batch_mode='batch',
               ncpu=1, buffer_size=8, hwm=86,
               mpi_backend='python'):
    super(Feeder, self).__init__(data=as_tuple(data_desc, t=DataDescriptor),
                                 read_only=True)
    # find intersection of all indices in DataDescriptor
    self._indices_keys = async(
        lambda: np.array(
            list(set.intersection(*[set(dat.indices.keys())
                                    for dat in self._data])),
            dtype=str)
    )()
    # ====== desire dtype ====== #
    nb_data = sum(len(dat._data) for dat in self._data)
    self._output_dtype = as_tuple(dtype, N=nb_data)
    # ====== Set default recipes ====== #
    self._recipes = RecipeList()
    self._recipes.set_feeder_info(nb_desc=len(self._data))
    self.set_multiprocessing(ncpu, buffer_size, hwm, mpi_backend)
    # ====== cache shape information ====== #
    # store first dimension
    self._cache_shape = None
    # if the recipes changed the shape need to be recalculated
    self._recipes_changed = False
    # ====== Iteration information ====== #
    self._running_iter = []
    # ====== batch mode ====== #
    if batch_filter is None:
      batch_filter = _dummy_batch_filter
    elif not hasattr(batch_filter, '__call__'):
      raise ValueError('batch_filter must be a function has 1 or 2 '
                       'parameters (X) or (X, y).')
    # check if batch_filter Picklable
    try:
      cPickle.dumps(batch_filter, protocol=2)
    except Exception:
      raise ValueError("`batch_filter` must be pickle-able, which must be "
                       "top-level function.")

    self._batch_filter = batch_filter
    # check batch_mode
    batch_mode = str(batch_mode).lower()
    if batch_mode not in ("batch", 'file'):
      raise ValueError("Only support `batch_mode`: 'file'; 'batch', but "
                       "given value: '%s'" % batch_mode)
    self._batch_mode = batch_mode

  # ==================== pickling ==================== #
项目:odin    作者:imito    | 项目源码 | 文件源码
def _flush(self, save_all=False):
    """
    Parameters
    ----------
    save_indices: bool
        force the indices dictionary to be saved, even though,
        its increased hasn't reach the maximum.
    """
    # check if closed or in read only mode
    if self.is_closed or self.read_only:
      return
    # ====== write new data ====== #
    # get old position
    file = self._file
    # start from header (i.e. "mmapdict")
    file.seek(len(MmapDict.HEADER))
    max_position = int(file.read(MmapDict.SIZE_BYTES))
    # ====== serialize the data ====== #
    # start from old_max_position, append new values
    file.seek(max_position)
    for key, value in self._cache_dict.items():
      try:
        value = marshal.dumps(value)
      except ValueError:
        raise RuntimeError("Cannot marshal.dump %s" % str(value))
      self.indices[key] = (max_position, len(value))
      max_position += len(value)
      file.write(value)
      # increase indices size (in MegaBytes)
      self._increased_indices_size += (8 + 8 + len(key)) / 1024. / 1024.
    # ====== write the dumped indices ====== #
    indices_length = 0
    if save_all or \
    self._increased_indices_size > MmapDict.MAX_INDICES_SIZE:
      indices_dump = cPickle.dumps(self.indices,
                                   protocol=cPickle.HIGHEST_PROTOCOL)
      indices_length = len(indices_dump)
      file.write(indices_dump)
      self._increased_indices_size = 0.
    # ====== update the position ====== #
    # write new max size
    file.seek(len(MmapDict.HEADER))
    max_position = ('%' + str(MmapDict.SIZE_BYTES) + 'd') % max_position
    file.write(max_position.encode())
    # update length of pickled indices dictionary
    if indices_length > 0:
      indices_length = ('%' + str(MmapDict.SIZE_BYTES) + 'd') % indices_length
      file.write(indices_length.encode())
    # flush everything
    file.flush()
    # upate the mmap
    self._mmap.close(); del self._mmap
    self._mmap = mmap.mmap(file.fileno(), length=0, offset=0,
                           flags=mmap.MAP_SHARED)
    # reset some values
    del self._cache_dict
    self._cache_dict = {}

  # ==================== I/O methods ==================== #
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_optimizations_preserved(self):
        a = T.dvector()  # the a is for 'anonymous' (un-named).
        x = T.dvector('x')
        s = T.dvector('s')
        xm = T.dmatrix('x')
        sm = T.dmatrix('s')

        f = function([a, x, s, xm, sm], ((a.T.T) * (tensor.dot(xm, (sm.T.T.T)) + x).T * (x / x) + s))
        old_default_mode = config.mode
        old_default_opt = config.optimizer
        old_default_link = config.linker
        try:
            try:
                str_f = pickle.dumps(f, protocol=-1)
                config.mode = 'Mode'
                config.linker = 'py'
                config.optimizer = 'None'
                g = pickle.loads(str_f)
                # print g.maker.mode
                # print compile.mode.default_mode
            except NotImplementedError as e:
                if e[0].startswith('DebugMode is not pickl'):
                    g = 'ok'
        finally:
            config.mode = old_default_mode
            config.optimizer = old_default_opt
            config.linker = old_default_link

        if g == 'ok':
            return

        assert f.maker is not g.maker
        assert f.maker.fgraph is not g.maker.fgraph
        tf = f.maker.fgraph.toposort()
        tg = f.maker.fgraph.toposort()
        assert len(tf) == len(tg)
        for nf, ng in zip(tf, tg):
            assert nf.op == ng.op
            assert len(nf.inputs) == len(ng.inputs)
            assert len(nf.outputs) == len(ng.outputs)
            assert [i.type for i in nf.inputs] == [i.type for i in ng.inputs]
            assert [i.type for i in nf.outputs] == [i.type for i in ng.outputs]
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def __init__(self, token, case_name='Case Recording',
                 endpoint='http://www.openmdao.org/visualization', port='', case_id=None,
                 suppress_output=False):
        """
        Initialize the OpenMDAOServerRecorder.

        Parameters
        ----------
        token: <string>
            The token to be passed as a user's unique identifier. Register to get a token
            at the given endpoint
        case_name: <string>
            The name this case should be stored under. Default: 'Case Recording'
        endpoint: <string>
            The URL (minus port, if not 80) where the server is hosted
        port: <string>
            The port which the server is listening on. Default to empty string (port 80)
        suppress_output: <bool>
            Indicates if all printing should be suppressed in this recorder
        """
        super(WebRecorder, self).__init__()

        self.model_viewer_data = None
        self._headers = {'token': token, 'update': "False"}
        if port != '':
            self._endpoint = endpoint + ':' + port + '/case'
        else:
            self._endpoint = endpoint + '/case'

        self._abs2prom = {'input': {}, 'output': {}}
        self._prom2abs = {'input': {}, 'output': {}}

        if case_id is None:
            case_data_dict = {
                'case_name': case_name,
                'owner': 'temp_owner'
            }

            case_data = json.dumps(case_data_dict)

            # Post case and get Case ID
            case_request = requests.post(self._endpoint, data=case_data, headers=self._headers)
            response = case_request.json()
            if response['status'] != 'Failed':
                self._case_id = str(response['case_id'])
            else:
                self._case_id = '-1'
                if not suppress_output:
                    print("Failed to initialize case on server. No messages will be accepted \
                    from server for this case. Make sure you registered for a token at the \
                    given endpoint.")

                if 'reasoning' in response:
                    if not suppress_output:
                        print("Failure reasoning: " + response['reasoning'])
        else:
            self._case_id = str(case_id)
            self._headers['update'] = "True"
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def _record_driver_iteration(self, counter, iteration_coordinate, success, msg,
                                 desvars, responses, objectives, constraints, sysincludes):
        """
        Record a driver iteration.

        Parameters
        ----------
        counter : int
            The global counter associated with this iteration.
        iteration_coordinate : str
            The iteration coordinate to identify this iteration.
        success : int
            Integer to indicate success.
        msg : str
            The metadata message.
        desvars : [JSON]
            The array of json objects representing the design variables.
        responses : [JSON]
            The array of json objects representing the responses.
        objectives : [JSON]
            The array of json objects representing the objectives.
        constraints : [JSON]
            The array of json objects representing the constraints.
        sysincludes : [JSON]
            The array of json objects representing the system variables explicitly included
            in the options.
        """
        driver_iteration_dict = {
            "counter": counter,
            "iteration_coordinate": iteration_coordinate,
            "success": success,
            "msg": msg,
            "desvars": [] if desvars is None else desvars,
            "responses": [] if responses is None else responses,
            "objectives": [] if objectives is None else objectives,
            "constraints": [] if constraints is None else constraints,
            "sysincludes": [] if sysincludes is None else sysincludes
        }

        global_iteration_dict = {
            'record_type': 'driver',
            'counter': counter
        }

        driver_iteration = json.dumps(driver_iteration_dict)
        global_iteration = json.dumps(global_iteration_dict)
        requests.post(self._endpoint + '/' + self._case_id + '/driver_iterations',
                      data=driver_iteration, headers=self._headers)
        requests.post(self._endpoint + '/' + self._case_id + '/global_iterations',
                      data=global_iteration, headers=self._headers)
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def _record_system_iteration(self, counter, iteration_coordinate, success, msg,
                                 inputs, outputs, residuals):
        """
        Record a system iteration.

        Parameters
        ----------
        counter : int
            The global counter associated with this iteration.
        iteration_coordinate : str
            The iteration coordinate to identify this iteration.
        success : int
            Integer to indicate success.
        msg : str
            The metadata message.
        inputs : [JSON]
            The array of json objects representing the inputs.
        outputs : [JSON]
            The array of json objects representing the outputs.
        residuals : [JSON]
            The array of json objects representing the residuals.
        """
        system_iteration_dict = {
            'counter': counter,
            'iteration_coordinate': iteration_coordinate,
            'success': success,
            'msg': msg,
            'inputs': [] if inputs is None else inputs,
            'outputs': [] if outputs is None else outputs,
            'residuals': [] if residuals is None else residuals
        }

        global_iteration_dict = {
            'record_type': 'system',
            'counter': counter
        }

        system_iteration = json.dumps(system_iteration_dict)
        global_iteration = json.dumps(global_iteration_dict)

        requests.post(self._endpoint + '/' + self._case_id + '/system_iterations',
                      data=system_iteration, headers=self._headers)
        requests.post(self._endpoint + '/' + self._case_id + '/global_iterations',
                      data=global_iteration, headers=self._headers)
项目:OpenMDAO    作者:OpenMDAO    | 项目源码 | 文件源码
def _record_solver_iteration(self, counter, iteration_coordinate, success, msg,
                                 abs_error, rel_error, outputs, residuals):
        """
        Record a solver iteration.

        Parameters
        ----------
        counter : int
            The global counter associated with this iteration.
        iteration_coordinate : str
            The iteration coordinate to identify this iteration.
        success : int
            Integer to indicate success.
        msg : str
            The metadata message.
        abs_error : float
            The absolute error.
        rel_error : float
            The relative error.
        outputs : [JSON]
            The array of json objects representing the outputs.
        residuals : [JSON]
            The array of json objects representing the residuals.
        """
        solver_iteration_dict = {
            'counter': counter,
            'iteration_coordinate': iteration_coordinate,
            'success': success,
            'msg': msg,
            'abs_err': abs_error,
            'rel_err': rel_error,
            'solver_output': [] if outputs is None else outputs,
            'solver_residuals': [] if residuals is None else residuals
        }

        global_iteration_dict = {
            'record_type': 'solver',
            'counter': counter
        }

        solver_iteration = json.dumps(solver_iteration_dict)
        global_iteration = json.dumps(global_iteration_dict)

        requests.post(self._endpoint + '/' + self._case_id + '/solver_iterations',
                      data=solver_iteration, headers=self._headers)
        requests.post(self._endpoint + '/' + self._case_id + '/global_iterations',
                      data=global_iteration, headers=self._headers)