Python google.protobuf.text_format 模块,PrintMessage() 实例源码
我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用google.protobuf.text_format.PrintMessage()。
def testPrintMessageSetByFieldNumber(self):
out = text_format.TextWriter(False)
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
text_format.PrintMessage(message, out, use_field_number=True)
self.CompareToGoldenText(out.getvalue(), '1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
out.close()
def testPrintMessageSetByFieldNumber(self):
out = text_format.TextWriter(False)
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
text_format.PrintMessage(message, out, use_field_number=True)
self.CompareToGoldenText(out.getvalue(), '1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
out.close()
def testPrintMessageSetByFieldNumber(self):
out = text_format.TextWriter(False)
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
text_format.PrintMessage(message, out, use_field_number=True)
self.CompareToGoldenText(out.getvalue(), '1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
out.close()
def testPrintMessageSetByFieldNumber(self):
out = text_format.TextWriter(False)
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
text_format.PrintMessage(message, out, use_field_number=True)
self.CompareToGoldenText(out.getvalue(), '1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
out.close()
def change_model(proto, layers=None):
model = util.ReadModel(proto)
if layers is None:
layers = ['image_hidden1', 'image_hidden2', 'image_hidden3',
'text_hidden1', 'text_hidden2', 'text_hidden3',
'image_layer', 'text_layer', 'joint_layer',
'image_tied_hidden', 'text_tied_hidden',
'image_hidden2_recon', 'text_hidden2_recon',
'cross_image_hidden2_recon', 'cross_text_hidden2_recon']
for layer in layers:
try:
layer_proto = next(lay for lay in model.layer if lay.name == layer)
layer_proto.dimensions = dimensions
except StopIteration:
pass
with open(proto, 'w') as f:
text_format.PrintMessage(model, f)
def change_data(proto, datas=None):
proto_cont = util.ReadData(proto)
if datas is None:
datas = []
for m in ['image', 'text']:
for i in [1,2,3]:
for t in ['train', 'validation', 'test']:
datas += [m+'_'+'hidden'+str(i)+'_'+t]
datas += ['bae_'+m+'_'+'hidden'+str(i)+'_'+t]
datas += ['bae_'+m+'_'+'hidden'+str(i)+'_'+t+'_all']
datas += ['corr_'+m+'_hidden'+str(i)+'_'+t]
for data in datas:
try:
data_proto = next(lay for lay in proto_cont.data if lay.name == data)
data_proto.dimensions[0] = dimensions
except StopIteration:
pass
with open(proto, 'w') as f:
text_format.PrintMessage(proto_cont, f)
def change_model(proto, layers=None):
model = util.ReadModel(proto)
if layers is None:
layers = ['image_hidden1', 'image_hidden2', 'image_hidden3',
'text_hidden1', 'text_hidden2', 'text_hidden3',
'image_layer', 'text_layer', 'joint_layer',
'image_tied_hidden', 'text_tied_hidden',
'image_hidden2_recon', 'text_hidden2_recon',
'cross_image_hidden2_recon', 'cross_text_hidden2_recon']
for layer in layers:
try:
layer_proto = next(lay for lay in model.layer if lay.name == layer)
layer_proto.dimensions = dimensions
except StopIteration:
pass
with open(proto, 'w') as f:
text_format.PrintMessage(model, f)
def change_data(proto, datas=None):
proto_cont = util.ReadData(proto)
if datas is None:
datas = []
for m in ['image', 'text']:
for i in [1,2,3]:
for t in ['train', 'validation', 'test']:
datas += [m+'_'+'hidden'+str(i)+'_'+t]
datas += ['bae_'+m+'_'+'hidden'+str(i)+'_'+t]
datas += ['bae_'+m+'_'+'hidden'+str(i)+'_'+t+'_all']
datas += ['corr_'+m+'_hidden'+str(i)+'_'+t]
for data in datas:
try:
data_proto = next(lay for lay in proto_cont.data if lay.name == data)
data_proto.dimensions[0] = dimensions
except StopIteration:
pass
with open(proto, 'w') as f:
text_format.PrintMessage(proto_cont, f)
def change_model(proto, layers=None):
model = util.ReadModel(proto)
if layers is None:
layers = ['image_hidden1', 'image_hidden2', 'image_hidden3',
'text_hidden1', 'text_hidden2', 'text_hidden3',
'image_layer', 'text_layer', 'joint_layer',
'image_tied_hidden', 'text_tied_hidden',
'image_hidden2_recon', 'text_hidden2_recon',
'cross_image_hidden2_recon', 'cross_text_hidden2_recon']
for layer in layers:
try:
layer_proto = next(lay for lay in model.layer if lay.name == layer)
layer_proto.dimensions = dimensions
except StopIteration:
pass
with open(proto, 'w') as f:
text_format.PrintMessage(model, f)
def start(self, rank):
self.rank = rank
if len(self.gpus) > 0:
self.device = self.gpus[rank]
if debug:
s = 'solver gpu %d' % self.gpus[self.rank] + \
' pid %d' % os.getpid() + ' size %d' % self.size + \
' rank %d' % self.rank
print(s, file = sys.stderr)
caffe.set_mode_gpu()
caffe.set_device(self.device)
caffe.set_solver_count(self.size)
caffe.set_solver_rank(self.rank)
caffe.set_multiprocess(True)
else:
print('solver cpu', file = sys.stderr)
caffe.set_mode_cpu()
if self.cmd.graph.endswith('.json'):
with open(self.cmd.graph, mode = 'r') as f:
graph = caffe_pb2.SolverParameter()
text_format.Merge(f.read(), graph)
self.graph = graph
else:
self.graph = self.solver_graph()
import tempfile
with tempfile.NamedTemporaryFile(mode = 'w+', delete = False) as f:
text_format.PrintMessage(self.graph, f)
tmp = f.name
self.caffe = caffe.AdamSolver(tmp)
if self.uid:
self.nccl = caffe.NCCL(self.caffe, self.uid)
self.nccl.bcast()
self.caffe.add_callback(self.nccl)
if self.caffe.param.layer_wise_reduce:
self.caffe.net.after_backward(self.nccl)
def EditTrainers(data_dir, model_dir, rep_dir, numsplits):
tnames = ['train_CD_image_layer1.pbtxt',
'train_CD_image_layer2.pbtxt',
'train_CD_text_layer1.pbtxt',
'train_CD_text_layer2.pbtxt',
'train_CD_joint_layer.pbtxt']
for tname in tnames:
t_op_file = os.path.join('trainers', 'dbn', tname)
t_op = util.ReadOperation(t_op_file)
if 'layer1' in tname:
t_op.data_proto_prefix = data_dir
else:
t_op.data_proto_prefix = rep_dir
t_op.checkpoint_directory = model_dir
with open(t_op_file, 'w') as f:
text_format.PrintMessage(t_op, f)
t_op_file = os.path.join('trainers', 'classifiers', 'baseclassifier.pbtxt')
t_op = util.ReadOperation(t_op_file)
for i in range(1, numsplits+1):
t_op_file = os.path.join('trainers', 'classifiers', 'split_%d.pbtxt' % i)
t_op.data_proto_prefix = rep_dir
t_op.data_proto = os.path.join('split_%d' % i, 'data.pbtxt')
t_op.checkpoint_prefix = model_dir
t_op.checkpoint_directory = os.path.join('classifiers','split_%d' % i)
with open(t_op_file, 'w') as f:
text_format.PrintMessage(t_op, f)
# Change prefix in multimodal dbn model
mnames = ['multimodal_dbn.pbtxt']
for mname in mnames:
model_file = os.path.join('models', mname)
model = util.ReadModel(model_file)
model.prefix = model_dir
with open(model_file, 'w') as f:
text_format.PrintMessage(model, f)
def main():
data_dir = sys.argv[1]
model_dir = sys.argv[2]
rep_dir = sys.argv[3]
gpu_mem = sys.argv[4]
main_mem = sys.argv[5]
numsplits = int(sys.argv[6])
data_pbtxt_file = os.path.join(data_dir, 'flickr.pbtxt')
data_pb = util.ReadData(data_pbtxt_file)
EditPaths(data_pb, data_dir, gpu_mem, main_mem)
with open(data_pbtxt_file, 'w') as f:
text_format.PrintMessage(data_pb, f)
EditTrainers(data_dir, model_dir, rep_dir, numsplits)
data_pbtxt_file_z = os.path.join(data_dir, 'flickr_z.pbtxt')
data_pbtxt_file_nnz = os.path.join(data_dir, 'flickr_nnz.pbtxt')
if not os.path.exists(data_pbtxt_file_z):
CreateMissingTextData(data_pb, data_pbtxt_file_z, data_pbtxt_file_nnz)
data_pb = util.ReadData(data_pbtxt_file_z)
EditPaths(data_pb, data_dir, gpu_mem, main_mem)
with open(data_pbtxt_file_z, 'w') as f:
text_format.PrintMessage(data_pb, f)
data_pb = util.ReadData(data_pbtxt_file_nnz)
EditPaths(data_pb, data_dir, gpu_mem, main_mem)
with open(data_pbtxt_file_nnz, 'w') as f:
text_format.PrintMessage(data_pb, f)
def WritePbtxt(output_file, pb):
with open(output_file, 'w') as f:
text_format.PrintMessage(pb, f)
def main():
data_pbtxt = sys.argv[1]
output_dir = sys.argv[2]
prefix = sys.argv[3]
r = int(sys.argv[4])
gpu_mem = sys.argv[5]
main_mem = sys.argv[6]
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
rep_dict, stats_files = MakeDict(data_pbtxt)
reps = rep_dict.keys()
indices_file = os.path.join(prefix, 'splits', 'train_indices_%d.npy' % r)
if os.path.exists(indices_file):
train = np.load(indices_file)
valid = np.load(os.path.join(prefix, 'splits', 'valid_indices_%d.npy' % r))
test = np.load(os.path.join(prefix, 'splits', 'test_indices_%d.npy' % r))
else:
print 'Creating new split.'
indices = np.arange(25000)
np.random.shuffle(indices)
train = indices[:10000]
valid = indices[10000:15000]
test = indices[15000:]
np.save(os.path.join(prefix, 'splits', 'train_indices_%d.npy' % r), train)
np.save(os.path.join(prefix, 'splits', 'valid_indices_%d.npy' % r), valid)
np.save(os.path.join(prefix, 'splits', 'test_indices_%d.npy' % r), test)
print 'Splitting data'
dataset_pb = deepnet_pb2.Dataset()
dataset_pb.name = 'flickr_split_%d' % r
dataset_pb.gpu_memory = gpu_mem
dataset_pb.main_memory = main_mem
for rep in reps:
data = rep_dict[rep]
stats_file = stats_files[rep]
DumpDataSplit(data[train], output_dir, 'train_%s' % rep, dataset_pb, stats_file)
DumpDataSplit(data[valid], output_dir, 'valid_%s' % rep, dataset_pb, stats_file)
DumpDataSplit(data[test], output_dir, 'test_%s' % rep, dataset_pb, stats_file)
print 'Splitting labels'
labels = np.load(os.path.join(prefix, 'labels.npy')).astype('float32')
DumpLabelSplit(labels[train,], output_dir, 'train_labels', dataset_pb)
DumpLabelSplit(labels[valid,], output_dir, 'valid_labels', dataset_pb)
DumpLabelSplit(labels[test,], output_dir, 'test_labels', dataset_pb)
#d = 'indices'
#np.save(os.path.join(output_dir, 'train_%s.npy' % d), train)
#np.save(os.path.join(output_dir, 'valid_%s.npy' % d), valid)
#np.save(os.path.join(output_dir, 'test_%s.npy' % d), test)
with open(os.path.join(output_dir, 'data.pbtxt'), 'w') as f:
text_format.PrintMessage(dataset_pb, f)
print 'Output written in directory %s' % output_dir