我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用builtins.bytes()。
def test_ab64_encode(self): """ab64_encode()""" from passlib.utils.binary import ab64_encode # accept bytes self.assertEqual(ab64_encode(hb("69b7")), b"abc") # reject unicode self.assertRaises(TypeError if PY3 else UnicodeEncodeError, ab64_encode, hb("69b7").decode("latin-1")) # insert correct padding before decoding self.assertEqual(ab64_encode(hb("69b71d")), b"abcd") # 0 mod 4 self.assertEqual(ab64_encode(hb("69b71d79")), b"abcdeQ") # 2 mod 4 self.assertEqual(ab64_encode(hb("69b71d79f8")), b"abcdefg") # 3 mod 4 # output "./" altchars self.assertEqual(ab64_encode(hb("69bfbf")), b"ab./")
def test_b64s_decode(self): """b64s_decode()""" from passlib.utils.binary import b64s_decode # accept bytes or unicode self.assertEqual(b64s_decode(b"abc"), hb("69b7")) self.assertEqual(b64s_decode(u("abc")), hb("69b7")) # reject non-ascii unicode self.assertRaises(ValueError, b64s_decode, u("ab\xff")) # underlying a2b_ascii treats non-base64 chars as "Incorrect padding" self.assertRaises(TypeError, b64s_decode, b"ab\xff") self.assertRaises(TypeError, b64s_decode, b"ab!") self.assertRaises(TypeError, b64s_decode, u("ab!")) # insert correct padding, handle dirty padding bits self.assertEqual(b64s_decode(b"abcd"), hb("69b71d")) # 0 mod 4 self.assertRaises(ValueError, b64s_decode, b"abcde") # 1 mod 4 self.assertEqual(b64s_decode(b"abcdef"), hb("69b71d79")) # 2 mod 4, dirty padding bits self.assertEqual(b64s_decode(b"abcdeQ"), hb("69b71d79")) # 2 mod 4, clean padding bits self.assertEqual(b64s_decode(b"abcdefg"), hb("69b71d79f8")) # 3 mod 4, clean padding bits
def test_b64s_encode(self): """b64s_encode()""" from passlib.utils.binary import b64s_encode # accept bytes self.assertEqual(b64s_encode(hb("69b7")), b"abc") # reject unicode self.assertRaises(TypeError if PY3 else UnicodeEncodeError, b64s_encode, hb("69b7").decode("latin-1")) # insert correct padding before decoding self.assertEqual(b64s_encode(hb("69b71d")), b"abcd") # 0 mod 4 self.assertEqual(b64s_encode(hb("69b71d79")), b"abcdeQ") # 2 mod 4 self.assertEqual(b64s_encode(hb("69b71d79f8")), b"abcdefg") # 3 mod 4 # output "+/" altchars self.assertEqual(b64s_encode(hb("69bfbf")), b"ab+/")
def _extract_header(nsis_file, firstheader): inflated_data, data_size = inflate_header(nsis_file, firstheader.data_offset) header = Header._make(_header_pack.unpack_from(inflated_data)) firstheader.header = header firstheader._raw_header = bytes(inflated_data) firstheader._raw_header_c_size = data_size # Parse the block headers. block_headers = [] for i in range(BLOCKS_COUNT): header_offset = i * _blockheader_pack.size block_header = BlockHeader._make(_blockheader_pack.unpack_from( header.raw_blocks[header_offset:])) block_headers.append(block_header) header.blocks = block_headers # Parse the install types. header.install_types = [ struct.unpack_from('<I', header.raw_install_types[i:])[0] for i in range(0, len(header.raw_install_types), 4)] return header
def no(mytype, argnums=(1,)): """ A shortcut for the disallow_types decorator that disallows only one type (in any position in argnums). Example use: >>> class newstr(object): ... @no('bytes') ... def __add__(self, other): ... pass >>> newstr(u'1234') + b'1234' #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError: argument can't be bytes The object can also be passed directly, but passing the string helps to prevent circular import problems. """ if isinstance(argnums, Integral): argnums = (argnums,) disallowed_types = [mytype] * len(argnums) return disallow_types(argnums, disallowed_types)
def __init__(self): self._protocol = 'auto' self.FWD_PROJ_EXPR = { 'pickle': self._marshal_pickle, 'json': self._marshal_json, 'jsongz': partial(self._marshal_json, as_gzip=True), 'bytes': self._marshal_bytes, 'bytesgz': partial(self._marshal_bytes, as_gzip=True), } self.BWD_PROJ_EXPR = { 'pickle': self._unmarshal_pickle, 'json': self._unmarshal_json, 'jsongz': self._unmarshal_json, 'bytes': self._unmarshal_bytes, 'bytesgz': self._unmarshal_bytes }
def _marshal_bytes(self, obj, as_gzip=False): prepad = b'' try: if isinstance(obj, bytes): buf = bytes(obj) prepad = b'r' elif isinstance(obj, str): buf = bytes(obj, 'utf-8') else: raise TypeError() except TypeError as e: raise TypeError('Object of class <{}> is not serializable by raw ' 'bytes'.format(type(obj))) if as_gzip: return prepad + self.BYTESGZ_IDENTIFIER + _to_gzip(buf) return prepad + self.BYTES_IDENTIFIER + buf
def shell(self, params, shell=False): if 'pm list package' in params: if self.apk_not_installed: return b'' if self.target_not_installed and MOCK_MISSING_PACKAGE_NAME in params: return b'' return bytes('package:%s' % MOCK_PACKAGE_NAME, 'utf-8') elif 'pm list instrumentation' in params: if self.apk_not_instrumented: return b'' if self.target_not_installed: return bytes('instrumentation:{p}/{r} (target={mp})'.format( p=MOCK_PACKAGE_NAME, r=snippet_client._INSTRUMENTATION_RUNNER_PACKAGE, mp=MOCK_MISSING_PACKAGE_NAME), 'utf-8') return bytes('instrumentation:{p}/{r} (target={p})'.format( p=MOCK_PACKAGE_NAME, r=snippet_client._INSTRUMENTATION_RUNNER_PACKAGE), 'utf-8') elif 'which' in params: return b''
def _gen_outfile(self): out_dir = getcwd() if isdefined(self.inputs.out_dir): out_dir = self.inputs.out_dir fname_comps = [] for comp, cpre in list(BIDS_COMP.items()): comp_val = None if isdefined(getattr(self.inputs, comp)): comp_val = getattr(self.inputs, comp) if comp_val == "None": comp_val = None comp_fmt = '{}-{}'.format if comp_val is not None: if isinstance(comp_val, (bytes, str)) and comp_val.startswith(cpre + '-'): comp_val = comp_val.split('-', 1)[-1] fname_comps.append(comp_fmt(cpre, comp_val)) fname_comps.append('%s.json' % self.inputs.modality) self._results['out_file'] = op.join(out_dir, '_'.join(fname_comps)) return self._results['out_file']
def get_data(self, format, pitch): '''Get the byte data of the image. :Parameters: `format` : str Format string of the return data. `pitch` : int Number of bytes per row. Negative values indicate a top-to-bottom arrangement. :since: pyglet 1.1 :rtype: sequence of bytes, or str ''' if format == self._current_format and pitch == self._current_pitch: return self._current_data return self._convert(format, pitch)
def set_data(self, format, pitch, data): '''Set the byte data of the image. :Parameters: `format` : str Format string of the return data. `pitch` : int Number of bytes per row. Negative values indicate a top-to-bottom arrangement. `data` : str or sequence of bytes Image data. :since: pyglet 1.1 ''' self._current_format = format self._current_pitch = pitch self._current_data = data self._current_texture = None self._current_mipmapped_texture = None
def set_mipmap_data(self, level, data): '''Set data for a mipmap level. Supplied data gives a compressed image for the given mipmap level. The image must be of the correct dimensions for the level (i.e., width >> level, height >> level); but this is not checked. If any mipmap levels are specified, they are used; otherwise, mipmaps for `mipmapped_texture` are generated automatically. :Parameters: `level` : int Level of mipmap image to set. `data` : sequence String or array/list of bytes giving compressed image data. Data must be in same format as specified in constructor. ''' # Extend mipmap_data list to required level self.mipmap_data += [None] * (level - len(self.mipmap_data)) self.mipmap_data[level - 1] = data
def parse_strings(data, counter, l): i = 0 error_count = 0 while i < len(data): data_slice = data[i:i + 2] if len(data_slice) < 2: break len_ = struct.unpack("<h", data_slice)[0] i += 2 if len_ != 0 and 0 <= len_*2 <= len(data): try: l[counter] = bytes(data[i: i + len_ * 2]).decode('utf-16') except UnicodeDecodeError: error_count += 1 pass if error_count >= 3: break i += len_ * 2 counter += 1
def __unpack__(self, data): data = bytes(data) if len(data) > self.__format_length__: data = data[:self.__format_length__] # OC Patch: # Some malware have incorrect header lengths. # Fail gracefully if this occurs # Buggy malware: a29b0118af8b7408444df81701ad5a7f # elif len(data) < self.__format_length__: raise PEFormatError('Data length less than expected header length.') if count_zeroes(data) == len(data): self.__all_zeroes__ = True self.__unpacked_data_elms__ = struct.unpack(self.__format__, data) for i in range(len(self.__unpacked_data_elms__)): for key in self.__keys__[i]: setattr(self, key, self.__unpacked_data_elms__[i])
def set_bytes_at_offset(self, offset, data): """Overwrite the bytes at the given file offset with the given string. Return True if successful, False otherwise. It can fail if the offset is outside the file's boundaries. """ if not isinstance(data, bytes): raise TypeError('data should be of type: bytes') if offset >= 0 and offset < len(self.__data__): self.__data__ = ( self.__data__[:offset] + data + self.__data__[offset+len(data):] ) else: return False return True
def adjust_FileAlignment( self, val, file_alignment ): global FileAlignment_Warning if file_alignment > FILE_ALIGNEMNT_HARDCODED_VALUE: # If it's not a power of two, report it: if not power_of_two(file_alignment) and FileAlignment_Warning is False: self.__warnings.append( 'If FileAlignment > 0x200 it should be a power of 2. Value: %x' % ( file_alignment) ) FileAlignment_Warning = True if file_alignment < FILE_ALIGNEMNT_HARDCODED_VALUE: return val return (old_div(val, 0x200)) * 0x200 # According to the document: # [ Microsoft Portable Executable and Common Object File Format Specification ] # "The alignment (in bytes) of sections when they are loaded into memory. It must be # greater than or equal to FileAlignment. The default is the page size for the # architecture." #
def local_ip(): """Get the local network IP of this machine""" try: ip = socket.gethostbyname(socket.gethostname()) except IOError: ip = socket.gethostbyname('localhost') if ip.startswith('127.'): # Check eth0, eth1, eth2, en0, ... interfaces = [ i + bytes(n) for i in (b'eth', b'en', b'wlan') for n in range(3) ] # :( for interface in interfaces: try: ip = interface_ip(interface) break except IOError: pass return ip
def create_shader(self, strings, shader_type): count = len(strings) # if we have no source code, ignore this shader if count < 1: return # create the shader handle shader = glCreateShader(shader_type) shaderstrings = [] for string in strings: shaderstrings.append(bytes(string, 'ascii')) # convert the source strings into a ctypes pointer-to-char array, and # upload them this is deep, dark, dangerous black magic - don't try # stuff like this at home! src = (c_char_p * count)(*shaderstrings) glShaderSource(shader, count, cast( pointer(src), POINTER(POINTER(c_char))), None) # compile the shader glCompileShader(shader) temp = c_int(0) # retrieve the compile status glGetShaderiv(shader, GL_COMPILE_STATUS, byref(temp)) # if compilation failed, print the log if not temp: # retrieve the log length glGetShaderiv(shader, GL_INFO_LOG_LENGTH, byref(temp)) # create a buffer for the log buffer = create_string_buffer(temp.value) # retrieve the log text glGetShaderInfoLog(shader, temp, None, buffer) # print the log to the console print(buffer.value) else: # all is well, so attach the shader to the program glAttachShader(self.handle, shader)
def test_getrandbytes(self): """getrandbytes()""" from passlib.utils import getrandbytes wrapper = partial(getrandbytes, self.getRandom()) self.assertEqual(len(wrapper(0)), 0) a = wrapper(10) b = wrapper(10) self.assertIsInstance(a, bytes) self.assertEqual(len(a), 10) self.assertEqual(len(b), 10) self.assertNotEqual(a, b)
def test_getrandstr(self, seed): """getrandstr()""" from passlib.utils import getrandstr wrapper = partial(getrandstr, self.getRandom(seed=seed)) # count 0 self.assertEqual(wrapper('abc',0), '') # count <0 self.assertRaises(ValueError, wrapper, 'abc', -1) # letters 0 self.assertRaises(ValueError, wrapper, '', 0) # letters 1 self.assertEqual(wrapper('a', 5), 'aaaaa') # NOTE: the following parts are non-deterministic, # with a small chance of failure (outside chance it may pick # a string w/o one char, even more remote chance of picking # same string). to combat this, we run it against multiple # fixed seeds (using run_with_fixed_seeds decorator), # and hope that they're sufficient to test the range of behavior. # letters x = wrapper(u('abc'), 32) y = wrapper(u('abc'), 32) self.assertIsInstance(x, unicode) self.assertNotEqual(x,y) self.assertEqual(sorted(set(x)), [u('a'),u('b'),u('c')]) # bytes x = wrapper(b'abc', 32) y = wrapper(b'abc', 32) self.assertIsInstance(x, bytes) self.assertNotEqual(x,y) # NOTE: decoding this due to py3 bytes self.assertEqual(sorted(set(x.decode("ascii"))), [u('a'),u('b'),u('c')])
def test_bytes(self): """test b() helper, bytes and native str type""" if PY3: import builtins self.assertIs(bytes, builtins.bytes) else: import __builtin__ as builtins self.assertIs(bytes, builtins.str) self.assertIsInstance(b'', bytes) self.assertIsInstance(b'\x00\xff', bytes) if PY3: self.assertEqual(b'\x00\xff'.decode("latin-1"), "\x00\xff") else: self.assertEqual(b'\x00\xff', "\x00\xff")
def test_to_bytes(self): """test to_bytes()""" from passlib.utils import to_bytes # check unicode inputs self.assertEqual(to_bytes(u('abc')), b'abc') self.assertEqual(to_bytes(u('\x00\xff')), b'\x00\xc3\xbf') # check unicode w/ encodings self.assertEqual(to_bytes(u('\x00\xff'), 'latin-1'), b'\x00\xff') self.assertRaises(ValueError, to_bytes, u('\x00\xff'), 'ascii') # check bytes inputs self.assertEqual(to_bytes(b'abc'), b'abc') self.assertEqual(to_bytes(b'\x00\xff'), b'\x00\xff') self.assertEqual(to_bytes(b'\x00\xc3\xbf'), b'\x00\xc3\xbf') # check byte inputs ignores enocding self.assertEqual(to_bytes(b'\x00\xc3\xbf', "latin-1"), b'\x00\xc3\xbf') # check bytes transcoding self.assertEqual(to_bytes(b'\x00\xc3\xbf', "latin-1", "", "utf-8"), b'\x00\xff') # check other self.assertRaises(AssertionError, to_bytes, 'abc', None) self.assertRaises(TypeError, to_bytes, None)
def test_ab64_decode(self): """ab64_decode()""" from passlib.utils.binary import ab64_decode # accept bytes or unicode self.assertEqual(ab64_decode(b"abc"), hb("69b7")) self.assertEqual(ab64_decode(u("abc")), hb("69b7")) # reject non-ascii unicode self.assertRaises(ValueError, ab64_decode, u("ab\xff")) # underlying a2b_ascii treats non-base64 chars as "Incorrect padding" self.assertRaises(TypeError, ab64_decode, b"ab\xff") self.assertRaises(TypeError, ab64_decode, b"ab!") self.assertRaises(TypeError, ab64_decode, u("ab!")) # insert correct padding, handle dirty padding bits self.assertEqual(ab64_decode(b"abcd"), hb("69b71d")) # 0 mod 4 self.assertRaises(ValueError, ab64_decode, b"abcde") # 1 mod 4 self.assertEqual(ab64_decode(b"abcdef"), hb("69b71d79")) # 2 mod 4, dirty padding bits self.assertEqual(ab64_decode(b"abcdeQ"), hb("69b71d79")) # 2 mod 4, clean padding bits self.assertEqual(ab64_decode(b"abcdefg"), hb("69b71d79f8")) # 3 mod 4, clean padding bits # support "./" or "+/" altchars # (lets us transition to "+/" representation, merge w/ b64s_decode) self.assertEqual(ab64_decode(b"ab+/"), hb("69bfbf")) self.assertEqual(ab64_decode(b"ab./"), hb("69bfbf"))
def test_decode_bytes_padding(self): """test decode_bytes() ignores padding bits""" bchr = (lambda v: bytes([v])) if PY3 else chr engine = self.engine m = self.m decode = engine.decode_bytes BNULL = b"\x00" # length == 2 mod 4: 4 bits of padding self.assertEqual(decode(m(0,0)), BNULL) for i in range(0,6): if engine.big: # 4 lsb padding correct = BNULL if i < 4 else bchr(1<<(i-4)) else: # 4 msb padding correct = bchr(1<<(i+6)) if i < 2 else BNULL self.assertEqual(decode(m(0,1<<i)), correct, "%d/4 bits:" % i) # length == 3 mod 4: 2 bits of padding self.assertEqual(decode(m(0,0,0)), BNULL*2) for i in range(0,6): if engine.big: # 2 lsb are padding correct = BNULL if i < 2 else bchr(1<<(i-2)) else: # 2 msg are padding correct = bchr(1<<(i+4)) if i < 4 else BNULL self.assertEqual(decode(m(0,0,1<<i)), BNULL + correct, "%d/2 bits:" % i)
def bascii_to_str(s): assert isinstance(s, bytes) return s.decode("ascii")
def iter_byte_values(s): assert isinstance(s, bytes) return s
def iter_byte_chars(s): assert isinstance(s, bytes) # FIXME: there has to be a better way to do this return (bytes([c]) for c in s)
def bascii_to_str(s): assert isinstance(s, bytes) return s
def iter_byte_chars(s): assert isinstance(s, bytes) return s
def hash_bytes(input_string): """Return a hash bytestring. Necessary to have consistent behavior between Python 2 & 3. """ if sys.version_info[0] < 3: return bytes(input_string, 'utf-8') return input_string.encode()
def _is_lzma(data): def _is_lzma_header(data): return data[0:3] == bytes([0x5d, 0, 0]) \ and data[5] == 0 \ and (data[6] & 0x80 == 0) return (_is_lzma_header(data) or (data[0] <= 1 and _is_lzma_header(data[1:])))
def _bzip2(f, size): from nrs.ext import bzlib data = f.read(size) return bytes(bzlib.decompress(data))
def _lzma(f, size): from nrs.ext import lzma data = f.read() return bytes(lzma.decompress(data))
def inflate_header(nsis_file, data_offset): nsis_file.seek(data_offset) chunk = bytes(nsis_file.read(0xc)) data_size = struct.unpack_from('<I', chunk)[0] solid = True decoder = None if _is_lzma(chunk): decoder = _lzma elif chunk[3] == 0x80: solid = False if _is_lzma(chunk[4:]): decoder = _lzma elif _is_bzip2(chunk[4:]): decoder = _bzip2 else: decoder = _zlib elif _is_bzip2(chunk): decoder = _bzip2 else: decoder = _zlib if solid: deflated_data = nsis_file.seek(data_offset) else: nsis_file.seek(data_offset+4) data_size &= 0x7fffffff inflated_data = decoder(nsis_file, data_size) if solid: data_size, = struct.unpack_from('<I', inflated_data) inflated_data = inflated_data[4:data_size+4] return inflated_data, data_size
def to_bytes(self): r""" >>> Int8TextRecord(42).to_bytes() b'\x88*' """ return bytes(super(Int8TextRecord, self).to_bytes() + struct.pack(b'<b', self.value))
def to_bytes(self): r""" >>> Int16TextRecord(1337).to_bytes() b'\x8a9\x05' """ return bytes(struct.pack(b'<B', self.type) + struct.pack(b'<h', self.value))