我们从Python开源项目中,提取了以下47个代码示例,用于说明如何使用bson.SON。
def from_json(self, json_data): """Convert from JSON.""" mongo_data = json.loads( json_data, object_hook=generate_object_hook(self._document) ) exclude = [ name for (name, fld) in self._document._fields.items() if any([ getattr(fld, "exclude_from_json", None), getattr(fld, "exclude_json", None) ]) ] for item in mongo_data: for exc in exclude: item.pop(exc, None) return [ self._document._from_son(bson.SON(data)) for data in mongo_data ]
def dumps(obj, *args, **kwargs): """Helper function that wraps :func:`json.dumps`. Recursive function that handles all BSON types including :class:`~bson.binary.Binary` and :class:`~bson.code.Code`. :Parameters: - `json_options`: A :class:`JSONOptions` instance used to modify the encoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. .. versionchanged:: 3.4 Accepts optional parameter `json_options`. See :class:`JSONOptions`. .. versionchanged:: 2.7 Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef instances. """ json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) return json.dumps(_json_convert(obj, json_options), *args, **kwargs)
def to_mongo(self, document, use_db_field=True, fields=None): id_field_name = self.document_type._meta['id_field'] id_field = self.document_type._fields[id_field_name] if isinstance(document, Document): # We need the id from the saved object to create the DBRef id_ = document.pk if id_ is None: self.error('You can only reference documents once they have' ' been saved to the database') else: self.error('Only accept a document object') # TODO: should raise here or will fail next statement value = SON(( ('_id', id_field.to_mongo(id_)), )) if fields: new_fields = [f for f in self.fields if f in fields] else: new_fields = self.fields value.update(dict(document.to_mongo(use_db_field, fields=new_fields))) return value
def dumps(obj, *args, **kwargs): """Helper function that wraps :class:`json.dumps`. Recursive function that handles all BSON types including :class:`~bson.binary.Binary` and :class:`~bson.code.Code`. .. versionchanged:: 2.7 Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef instances. """ return json.dumps(_json_convert(obj), *args, **kwargs)
def _json_convert(obj): """Recursive helper method that converts BSON types so they can be converted into json. """ if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support return SON(((k, _json_convert(v)) for k, v in iteritems(obj))) elif hasattr(obj, '__iter__') and not isinstance(obj, (text_type, bytes)): return list((_json_convert(v) for v in obj)) try: return default(obj) except TypeError: return obj
def _get_output(self, output): if isinstance(output, str) or isinstance(output, SON): out = output elif isinstance(output, dict): ordered_output = [] for part in ('replace', 'merge', 'reduce'): value = output.get(part) if value: ordered_output.append((part, value)) break else: raise OperationError("actionData not specified for output") db_alias = output.get('db_alias') remaing_args = ['db', 'sharded', 'nonAtomic'] if db_alias: ordered_output.append(('db', get_db(db_alias).name)) del remaing_args[0] for part in remaing_args: value = output.get(part) if value: ordered_output.append((part, value)) out = SON(ordered_output) else: raise ConfusionError('Bad output type %r'.format(type(output))) return out
def dumps(obj, *args, **kwargs): """Helper function that wraps :class:`json.dumps`. Recursive function that handles all BSON types including :class:`~bson.binary.Binary` and :class:`~bson.code.Code`. .. versionchanged:: 2.7 Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef instances. (But not in Python 2.4.) """ if not json_lib: raise Exception("No json library available") return json.dumps(_json_convert(obj), *args, **kwargs)
def _json_convert(obj): """Recursive helper method that converts BSON types so they can be converted into json. """ if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support return SON(((k, _json_convert(v)) for k, v in obj.iteritems())) elif hasattr(obj, '__iter__') and not isinstance(obj, string_types): return list((_json_convert(v) for v in obj)) try: return default(obj) except TypeError: return obj
def _json_convert(obj): """Recursive helper method that converts BSON types so they can be converted into json. """ if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support return SON(((k, _json_convert(v)) for k, v in obj.items())) elif hasattr(obj, '__iter__') and not isinstance(obj, string_type): return list((_json_convert(v) for v in obj)) try: return default(obj) except TypeError: return obj
def aggregate_post_types(): """ counting the types of posts. :return: list of dicts each one contain type name and counter """ pipeline = [ {"$group": {"_id": "$type", "count": {"$sum": 1}}}, {"$sort": SON([("count", -1), ("_id", -1)])} ] return posts.aggregate(pipeline)
def _json_convert(obj, json_options=DEFAULT_JSON_OPTIONS): """Recursive helper method that converts BSON types so they can be converted into json. """ if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support return SON(((k, _json_convert(v, json_options)) for k, v in iteritems(obj))) elif hasattr(obj, '__iter__') and not isinstance(obj, (text_type, bytes)): return list((_json_convert(v, json_options) for v in obj)) try: return default(obj, json_options) except TypeError: return obj
def model_fields(model, only=None, exclude=None, field_args=None, converter=None): """ Generate a dictionary of fields for a given database model. See `model_form` docstring for description of parameters. """ from mongoengine.base import BaseDocument, DocumentMetaclass if not isinstance(model, (BaseDocument, DocumentMetaclass)): raise TypeError('model must be a mongoengine Document schema') converter = converter or ModelConverter() field_args = field_args or {} names = ((k, v.creation_counter) for k, v in model._fields.items()) field_names = [n[0] for n in sorted(names, key=lambda n: n[1])] if only: field_names = [x for x in only if x in set(field_names)] elif exclude: field_names = [x for x in field_names if x not in set(exclude)] field_dict = OrderedDict() for name in field_names: model_field = model._fields[name] field = converter.convert(model, model_field, field_args.get(name)) if field is not None: field_dict[name] = field return field_dict
def __get__(self, instance, owner): if instance is None: return self value = instance._data.get(self.name) self._auto_dereference = instance._fields[self.name]._auto_dereference if self._auto_dereference and isinstance(value, (dict, SON)): dereferenced = self.dereference(value) if dereferenced is None: raise DoesNotExist('Trying to dereference unknown document %s' % value) else: instance._data[self.name] = dereferenced return super(GenericReferenceField, self).__get__(instance, owner)
def to_mongo(self, document): if document is None: return None if isinstance(document, (dict, SON, ObjectId, DBRef)): return document id_field_name = document.__class__._meta['id_field'] id_field = document.__class__._fields[id_field_name] if isinstance(document, Document): # We need the id from the saved object to create the DBRef id_ = document.id if id_ is None: self.error('You can only reference documents once they have' ' been saved to the database') else: id_ = document id_ = id_field.to_mongo(id_) collection = document._get_collection_name() ref = DBRef(collection, id_) return SON(( ('_cls', document._class_name), ('_ref', ref) ))
def search_text(self, text, language=None): """ Start a text search, using text indexes. Require: MongoDB server version 2.6+. :param language: The language that determines the list of stop words for the search and the rules for the stemmer and tokenizer. If not specified, the search uses the default language of the index. For supported languages, see `Text Search Languages <http://docs.mongodb.org/manual/reference/text-search-languages/#text-search-languages>`. """ queryset = self.clone() if queryset._search_text: raise OperationError( 'It is not possible to use search_text two times.') query_kwargs = SON({'$search': text}) if language: query_kwargs['$language'] = language queryset._query_obj &= Q(__raw__={'$text': query_kwargs}) queryset._mongo_query = None queryset._cursor_obj = None queryset._search_text = text return queryset
def to_mongo(self, value): if isinstance(value, dict): return value return SON([('type', self._type), ('coordinates', value)])
def default(obj): # We preserve key order when rendering SON, DBRef, etc. as JSON by # returning a SON for those types instead of a dict. if isinstance(obj, ObjectId): return {"$oid": str(obj)} if isinstance(obj, DBRef): return _json_convert(obj.as_doc()) if isinstance(obj, datetime.datetime): # TODO share this code w/ bson.py? if obj.utcoffset() is not None: obj = obj - obj.utcoffset() millis = int(calendar.timegm(obj.timetuple()) * 1000 + obj.microsecond / 1000) return {"$date": millis} if isinstance(obj, (RE_TYPE, Regex)): flags = "" if obj.flags & re.IGNORECASE: flags += "i" if obj.flags & re.LOCALE: flags += "l" if obj.flags & re.MULTILINE: flags += "m" if obj.flags & re.DOTALL: flags += "s" if obj.flags & re.UNICODE: flags += "u" if obj.flags & re.VERBOSE: flags += "x" if isinstance(obj.pattern, text_type): pattern = obj.pattern else: pattern = obj.pattern.decode('utf-8') return SON([("$regex", pattern), ("$options", flags)]) if isinstance(obj, MinKey): return {"$minKey": 1} if isinstance(obj, MaxKey): return {"$maxKey": 1} if isinstance(obj, Timestamp): return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])} if isinstance(obj, Code): return SON([('$code', str(obj)), ('$scope', obj.scope)]) if isinstance(obj, Binary): return SON([ ('$binary', base64.b64encode(obj).decode()), ('$type', "%02x" % obj.subtype)]) if PY3 and isinstance(obj, bytes): return SON([ ('$binary', base64.b64encode(obj).decode()), ('$type', "00")]) if isinstance(obj, uuid.UUID): return {"$uuid": obj.hex} raise TypeError("%r is not JSON serializable" % obj)
def default(obj): # We preserve key order when rendering SON, DBRef, etc. as JSON by # returning a SON for those types instead of a dict. This works with # the "json" standard library in Python 2.6+ and with simplejson # 2.1.0+ in Python 2.5+, because those libraries iterate the SON # using PyIter_Next. Python 2.4 must use simplejson 2.0.9 or older, # and those versions of simplejson use the lower-level PyDict_Next, # which bypasses SON's order-preserving iteration, so we lose key # order in Python 2.4. if isinstance(obj, ObjectId): return {"$oid": str(obj)} if isinstance(obj, DBRef): return _json_convert(obj.as_doc()) if isinstance(obj, datetime.datetime): # TODO share this code w/ bson.py? if obj.utcoffset() is not None: obj = obj - obj.utcoffset() millis = int(calendar.timegm(obj.timetuple()) * 1000 + obj.microsecond / 1000) return {"$date": millis} if isinstance(obj, (RE_TYPE, Regex)): flags = "" if obj.flags & re.IGNORECASE: flags += "i" if obj.flags & re.LOCALE: flags += "l" if obj.flags & re.MULTILINE: flags += "m" if obj.flags & re.DOTALL: flags += "s" if obj.flags & re.UNICODE: flags += "u" if obj.flags & re.VERBOSE: flags += "x" if isinstance(obj.pattern, unicode): pattern = obj.pattern else: pattern = obj.pattern.decode('utf-8') return SON([("$regex", pattern), ("$options", flags)]) if isinstance(obj, MinKey): return {"$minKey": 1} if isinstance(obj, MaxKey): return {"$maxKey": 1} if isinstance(obj, Timestamp): return SON([("t", obj.time), ("i", obj.inc)]) if isinstance(obj, Code): return SON([('$code', str(obj)), ('$scope', obj.scope)]) if isinstance(obj, Binary): return SON([ ('$binary', base64.b64encode(obj).decode()), ('$type', "%02x" % obj.subtype)]) if PY3 and isinstance(obj, binary_type): return SON([ ('$binary', base64.b64encode(obj).decode()), ('$type', "00")]) if bson.has_uuid() and isinstance(obj, bson.uuid.UUID): return {"$uuid": obj.hex} raise TypeError("%r is not JSON serializable" % obj)
def _find_references(self, items, depth=0): """ Recursively finds all db references to be dereferenced :param items: The iterable (dict, list, queryset) :param depth: The current depth of recursion """ reference_map = {} if not items or depth >= self.max_depth: return reference_map # Determine the iterator to use if isinstance(items, dict): iterator = list(items.values()) else: iterator = items # Recursively find dbreferences depth += 1 for item in iterator: if isinstance(item, (Document, EmbeddedDocument)): for field_name, field in item._fields.items(): v = item._data.get(field_name, None) if isinstance(v, DBRef): reference_map.setdefault(field.document_type, set()).add(v.id) elif isinstance(v, (dict, SON)) and '_ref' in v: reference_map.setdefault(get_document(v['_cls']), set()).add(v['_ref'].id) elif isinstance(v, (dict, list, tuple)) and depth <= self.max_depth: field_cls = getattr(getattr(field, 'field', None), 'document_type', None) references = self._find_references(v, depth) for key, refs in references.items(): if isinstance(field_cls, (Document, TopLevelDocumentMetaclass)): key = field_cls reference_map.setdefault(key, set()).update(refs) elif isinstance(item, DBRef): reference_map.setdefault(item.collection, set()).add(item.id) elif isinstance(item, (dict, SON)) and '_ref' in item: reference_map.setdefault(get_document(item['_cls']), set()).add(item['_ref'].id) elif isinstance(item, (dict, list, tuple)) and depth - 1 <= self.max_depth: references = self._find_references(item, depth - 1) for key, refs in references.items(): reference_map.setdefault(key, set()).update(refs) return reference_map
def get_client_ratio(request): """ ???? client ???? :param request: :return: """ success, msg, data = False, '', [] post_data = json_loads(request.body) model_cls, filter_dict = parse_ratio_post_data(post_data) pipeline = [ { "$group": { "_id": "$client_id", "count": {"$sum": "$count"} }, }, { "$sort": SON([("count", -1), ("_id", -1)]) } ] count_list = model_cls.objects(**filter_dict).aggregate(*pipeline) count_list = list(count_list) client_id_list = [t['_id'] for t in count_list] clients = Client.objects.filter(id__in=client_id_list).values('name', 'id') client_dict = {} for t in clients: client_dict[t['id']] = t['name'] legend = [] y_data = [] # ??????????????????? id ?????? new_count_list = [] for t in count_list: if t['_id'] in client_dict: new_count_list.append(t) count_list = new_count_list count_list = sorted(count_list, key=lambda x: x['count'], reverse=True) count_list = count_list[:ECHARTS_PIPE_PLOT_MAX_NUM] for t in count_list: name = client_dict.get(t['_id']) if name: legend.append(name) y_data.append({'value': t['count'], 'name': name}) data = { 'legend': legend, 'y_data': y_data } return http_response_json({'success': True, 'msg': msg, 'data': data})
def get_endpoint_ratio(request): """ ???? endpoint ???? :param request: :return: """ success, msg, data = False, '', [] post_data = json_loads(request.body) model_cls, filter_dict = parse_ratio_post_data(post_data) pipeline = [ { "$group": { "_id": "$endpoint_id", "count": {"$sum": "$count"} }, }, { "$sort": SON([("count", -1), ("_id", -1)]) } ] count_list = model_cls.objects(**filter_dict).aggregate(*pipeline) count_list = list(count_list) endpoint_id_list = [t['_id'] for t in count_list] endpoints = Endpoint.objects.filter(id__in=endpoint_id_list).values('unique_name', 'id') endpoint_dict = {} for t in endpoints: endpoint_dict[t['id']] = t['unique_name'] legend = [] y_data = [] # ??????????????????? id ?????? new_count_list = [] for t in count_list: if t['_id'] in endpoint_dict: new_count_list.append(t) count_list = new_count_list # ???????????????? count_list = sorted(count_list, key=lambda x: x['count'], reverse=True) count_list = count_list[:ECHARTS_PIPE_PLOT_MAX_NUM] for t in count_list: name = endpoint_dict.get(t['_id']) if name: legend.append(name) y_data.append({'value': t['count'], 'name': name}) data = { 'legend': legend, 'y_data': y_data } return http_response_json({'success': True, 'msg': msg, 'data': data})