我们从Python开源项目中,提取了以下43个代码示例,用于说明如何使用networkx.topological_sort()。
def order_build(graph): ''' Assumes that packages are in graph. Builds a temporary graph of relevant nodes and returns it topological sort. Relevant nodes selected in a breadth first traversal sourced at each pkg in packages. ''' reorder_cyclical_test_dependencies(graph) try: order = nx.topological_sort(graph, reverse=True) except nx.exception.NetworkXUnfeasible: raise ValueError("Cycles detected in graph: %s", nx.find_cycle(graph, orientation='reverse')) return order
def _get_calc_nodes(self, name): g = nx.DiGraph() g.add_nodes_from(self.dag.nodes()) g.add_edges_from(self.dag.edges()) for n in nx.ancestors(g, name): node = self.dag.node[n] state = node[_AN_STATE] if state == States.UPTODATE or state == States.PINNED: g.remove_node(n) ancestors = nx.ancestors(g, name) for n in ancestors: if state == States.UNINITIALIZED and len(self.dag.pred[n]) == 0: raise Exception("Cannot compute {} because {} uninitialized".format(name, n)) if state == States.PLACEHOLDER: raise Exception("Cannot compute {} because {} is placeholder".format(name, n)) ancestors.add(name) nodes_sorted = nx.topological_sort(g) return [n for n in nodes_sorted if n in ancestors]
def to_df(self): """ Get a dataframe containing the states and value of all nodes of computation :: >>> comp = loman.Computation() >>> comp.add_node('foo', value=1) >>> comp.add_node('bar', value=2) >>> comp.to_df() state value is_expansion bar States.UPTODATE 2 NaN foo States.UPTODATE 1 NaN """ df = pd.DataFrame(index=nx.topological_sort(self.dag)) df[_AN_STATE] = pd.Series(nx.get_node_attributes(self.dag, _AN_STATE)) df[_AN_VALUE] = pd.Series(nx.get_node_attributes(self.dag, _AN_VALUE)) df_timing = pd.DataFrame.from_dict(nx.get_node_attributes(self.dag, 'timing'), orient='index') df = pd.merge(df, df_timing, left_index=True, right_index=True, how='left') return df
def get_topological_sorted_tasks(self): """ Create the tasks Graph and perform topological sorting A topological sort is a nonunique permutation of the nodes such that an edge from u to v implies that u appears before v in the topological sort order. :return: Return a list of nodes in topological sort order. """ # First, map the tasks IDs to their original position tasks_position = {} for count_position, task in enumerate(self.workflow['tasks']): tasks_position[task['id']] = count_position sorted_tasks_id = nx.topological_sort(self.graph, reverse=False) return sorted_tasks_id
def workflow_execution_parcial(self): topological_sort = self.get_topological_sorted_tasks() for node_obj in topological_sort: # print self.workflow_graph.node[node] print (nx.ancestors(self.graph, node_obj), self.graph.predecessors(node_obj), node_obj, self.graph.node[node_obj]['in_degree_required'], self.graph.node[node_obj]['in_degree'], self.graph.node[node_obj]['out_degree_required'], self.graph.node[node_obj]['out_degree'] ) return True # only to debug
def get_nodes(self, name, next_layer=False, last_layer=False, type=None): if type is None: name2node = {node.name: node for node in self.nodes()} else: name2node = {node.name: node for node in self.nodes() if node.type in type} assert name in name2node.keys(), " Name must be uniqiue" node = name2node[name] if next_layer: if type is None: return self.successors(node) else: poss_list, begin = [], False for poss in nx.topological_sort(self): if poss == node: begin = True continue if begin and poss in name2node.values(): poss_list.append(poss) return [poss_list[0]] elif last_layer: return self.predecessors(node) else: return [node]
def update(self): self.type2ind = {} for node in self.nodes(): import re ind = int(re.findall(r'^\w+?(\d+)$', node.name)[0]) self.type2ind[node.type] = self.type2ind.get(node.type, []) + [ind] for node in nx.topological_sort(self): if node.type in ['Conv2D', 'Group', 'Conv2D_Pooling']: plus = 1 else: plus = 0 if len(self.predecessors(node)) == 0: node.depth = 0 else: pre_depth = [_node.depth for _node in self.predecessors(node)] pre_depth = max(pre_depth) node.depth = self.max_depth = pre_depth + plus
def longestWeightedPath(self, G): """ find longest path in a weighted DAG :param G: dag (networkx graph object) :return: longest_weighted_path (list) """ dist = {} # stores [node, distance] pair # start with topological sorted order for node in nx.topological_sort(G): # pairs of dist,node for all incoming edges pairs = [(dist[v][0] + G[v][node]['weight'], v) for v in G.pred[node]] # incoming pairs if pairs: dist[node] = max(pairs) else: dist[node] = (0, node) node, (length,_) = max(dist.items(), key=lambda x:x[1]) path = [] while length > 0: path.append(node) length, node = dist[node] return list(reversed(path))
def _sort_deps_to_targets(self): assert(self.state == TransactionState.INIT) self.touches = list(reversed(list(nx.topological_sort(self.depend_G)))) self.installs = [ target for target in self.touches if not target.is_local ]
def exclude_subtree_from(self, package): sub = dfs_tree(self.G, package) root = nx.topological_sort(self.G)[0] seen = {package} for n in sub.nodes(): if n in seen: continue seen.add(n) if local_node_connectivity(self.G, root, n) > 1: sub.remove_nodes_from(InstalledGraph(sub).exclude_subtree_from(n)) return InstalledGraph(sub)
def root_node(self): """ Gives the root node of this graph. :return: datamodel.base.node.Node instance """ return nx.topological_sort(self._nxgraph, reverse=True)[-1]
def execute(self, result_label="result"): """ Starts from the leaf nodes, calculates their outputs and feeds them as inputs to their parent ones. The loop stops once the root node is reached. Optionally, you can assign a custom label to the output of the root node. :param result_label: str (optional) :return: """ # Cannote execute graphs with isles if self.has_isles(): raise exceptions.GraphExecutionError("Cannot execute graphs with " "isolated nodes") # Sort post-order (leaf nodes before, root node at then end) ordered_nodes = nx.topological_sort(self._nxgraph, reverse=True) # Assign a label to the output of the very last node to be executed: # the root node! self.root_node.set_output_label(result_label) # Output of node N is input for its parent try: for n in ordered_nodes: output = n.execute() predecessors = self._nxgraph.predecessors(n) if not predecessors: return output for parent in predecessors: parent.input(output) except exceptions.StopGraphExecutionSignal as e: console.info(e.message) return None except Exception as e: console.error(traceback.format_exc()) raise exceptions.GraphExecutionError(e.message)
def build_involved_dag(self, data_keys): # get the nodes and edges that will be considered during the generation involved_dag = self._dag.build_directed_graph(data_keys, root_node_key='generate') involved_dag.reverse(copy=False) generation_order = nx.topological_sort(involved_dag)[:-1] involved_dag.node['generate']['skipped'] = False self._dag_prune_can_skip(involved_dag, generation_order) return involved_dag, generation_order
def setup_pyramid(self, N): # pyramid of normals with known covariances and latent means assert(N > 1) self.N = N # number of layers in the pyramid lambdas = [1.1 * (k + 1) / N for k in range(N + 2)] self.lambdas = list(map(lambda x: Variable(torch.Tensor([x])), lambdas)) # generate data self.data = [] self.N_data = 3 bottom_layer_size = 2 ** (N - 1) for i in range(bottom_layer_size): data_i = [] for k in range(self.N_data): data_i.append(Variable(torch.Tensor([0.25]) + (0.1 + 0.4 * (i + 1) / bottom_layer_size) * torch.randn(1))) self.data.append(data_i) self.data_sums = [sum(self.data[i]) for i in range(bottom_layer_size)] self.N_data = Variable(torch.Tensor([self.N_data])) self.q_dag = self.construct_q_dag() # compute the order in which guide samples are generated self.q_topo_sort = list(networkx.topological_sort(self.q_dag)) self.which_nodes_reparam = self.setup_reparam_mask(len(self.q_topo_sort)) self.calculate_variational_targets() self.set_model_permutations() # for choosing which latents should be reparameterized
def forward(self, *inputs): self.assert_graph_is_valid() input_nodes = self.input_nodes output_nodes = self.output_nodes assert len(inputs) == len(input_nodes), "Was expecting {} " \ "arguments for as many input nodes, got {}."\ .format(len(input_nodes), len(inputs)) # Unpack inputs to input nodes for input, input_node in zip(inputs, input_nodes): self.forward_through_node(input_node, input=input) # Toposort the graph toposorted = topological_sort(self.graph) # Remove all input and output nodes toposorted = [name for name in toposorted if name not in input_nodes and name not in output_nodes] # Forward for node in toposorted: self.forward_through_node(node) # Read outputs from output nodes outputs = [] for output_node in output_nodes: # Get all incoming edges to output node outputs_from_node = [self.graph[incoming][this]['payload'] for incoming, this in self.graph.in_edges(output_node)] outputs.append(pyu.from_iterable(outputs_from_node)) # Clear payloads for next pass self.clear_payloads() # Done. return pyu.from_iterable(outputs)
def order_build(graph, packages=None, level=0, filter_dirty=True): ''' Assumes that packages are in graph. Builds a temporary graph of relevant nodes and returns it topological sort. Relevant nodes selected in a breadth first traversal sourced at each pkg in packages. Values expected for packages is one of None, sequence: None: build the whole graph empty sequence: build nodes marked dirty non-empty sequence: build nodes in sequence ''' if not packages: packages = graph.nodes() if filter_dirty: packages = dirty(graph) tmp_global = graph.subgraph(packages) # copy relevant node data to tmp_global for n in tmp_global.nodes_iter(): tmp_global.node[n] = graph.node[n] try: order = nx.topological_sort(tmp_global, reverse=True) except nx.exception.NetworkXUnfeasible: raise ValueError("Cycles detected in graph: {0}".format(nx.find_cycle(tmp_global, orientation='ignore'))) return tmp_global, order
def get_named_nodes(self, name): """Return a list of "op" nodes with the given name.""" nlist = [] if name not in self.basis: raise DAGCircuitError("%s is not in the list of basis operations" % name) # Iterate through the nodes of self in topological order ts = nx.topological_sort(self.multi_graph) for n in ts: nd = self.multi_graph.node[n] if nd["type"] == "op" and nd["name"] == name: nlist.append(n) return nlist
def serial_layers(self): """Return a list of layers for all gates of this circuit. A serial layer is a circuit with one gate. The layers have the same structure as in layers(). """ layers_list = [] ts = nx.topological_sort(self.multi_graph) for n in ts: nxt_nd = self.multi_graph.node[n] if nxt_nd["type"] == "op": new_layer = DAGCircuit() for k, v in self.qregs.items(): new_layer.add_qreg(k, v) for k, v in self.cregs.items(): new_layer.add_creg(k, v) new_layer.basis = copy.deepcopy(self.basis) new_layer.gates = copy.deepcopy(self.gates) # Save the support of the operation we add to the layer support_list = [] # Operation data qa = copy.copy(nxt_nd["qargs"]) ca = copy.copy(nxt_nd["cargs"]) pa = copy.copy(nxt_nd["params"]) co = copy.copy(nxt_nd["condition"]) cob = self._bits_in_condition(co) # Add node to new_layer new_layer.apply_operation_back(nxt_nd["name"], qa, ca, pa, co) # Add operation to partition if nxt_nd["name"] != "barrier": # support_list.append(list(set(qa) | set(ca) | set(cob))) support_list.append(list(set(qa))) l_dict = {"graph": new_layer, "partition": support_list} layers_list.append(l_dict) return layers_list
def collect_runs(self, namelist): """Return a set of runs of "op" nodes with the given names. For example, "... h q[0]; cx q[0],q[1]; cx q[0],q[1]; h q[1]; .." would produce the tuple of cx nodes as an element of the set returned from a call to collect_runs(["cx"]). If instead the cx nodes were "cx q[0],q[1]; cx q[1],q[0];", the method would still return the pair in a tuple. The namelist can contain names that are not in the circuit's basis. Nodes must have only one successor to continue the run. """ group_list = [] # Iterate through the nodes of self in topological order # and form tuples containing sequences of gates # on the same qubit(s). ts = nx.topological_sort(self.multi_graph) nodes_seen = dict(zip(ts, [False] * len(ts))) for node in ts: nd = self.multi_graph.node[node] if nd["type"] == "op" and nd["name"] in namelist \ and not nodes_seen[node]: group = [node] nodes_seen[node] = True s = self.multi_graph.successors(node) while len(s) == 1 and \ self.multi_graph.node[s[0]]["type"] == "op" and \ self.multi_graph.node[s[0]]["name"] in namelist: group.append(s[0]) nodes_seen[s[0]] = True s = self.multi_graph.successors(s[0]) if len(group) > 1: group_list.append(tuple(group)) return set(group_list)
def pre_transpile(self, workflow, graph, params=None): params = params or {} for visitor in self.VISITORS: visitor().visit(workflow, nx.topological_sort(graph), self.operations, params)
def topological_order(self, reverse=False): """ Topological sort of the graph. :param reverse: whether to reverse the sort :return: list which represents the topological sort """ task_ids = topological_sort(self._graph) if reverse: task_ids = reversed(tuple(task_ids)) for task_id in task_ids: yield self.get_task(task_id)
def calc_choice_weight(self, evolution_choice_list, model): model_depth = 0 for node in nx.topological_sort(model.graph): if node.type in ['Conv2D', 'Group', 'Conv2D_Pooling']: model_depth = model_depth + 1 model_max_depth = model.config.model_max_depth max_pooling_limit = model.config.max_pooling_limit max_pooling_cnt = model.config.max_pooling_cnt weight = {} if 'deeper_with_pooling' in evolution_choice_list: weight['deeper_with_pooling'] = int( model_max_depth - model_max_depth / (2 * max_pooling_limit) * max_pooling_cnt) if 'deeper' in evolution_choice_list: weight['deeper'] = model_max_depth / 2 if 'wider' in evolution_choice_list: weight['wider'] = model_max_depth / 2 if 'add_skip' in evolution_choice_list: weight['add_skip'] = model_depth / 2 * 2 if 'add_group' in evolution_choice_list: weight['add_group'] = model_depth / 2 * 2 # choice_len = len(evolution_choice_list) # return [1] * choice_len # equal weight now return weight
def add_skip(self, model, config): assert nx.is_directed_acyclic_graph(model.graph) topo_nodes = nx.topological_sort(model.graph) names = [node.name for node in topo_nodes if node.type == 'Conv2D' or node.type == 'Group' or node.type == 'Conv2D_Pooling'] if len(names) <= 2: logger.info('can\'t find a suitable layer to apply add_skip operation,return origin model') return model, False max_iter = 100 for i in range(max_iter + 1): if i == max_iter: logger.info('can\'t find a suitable layer to apply add_skip operation,return origin model') return model, False from_idx = np.random.randint(0, len(names) - 2) to_idx = from_idx + 1 next_nodes = model.graph.get_nodes(names[to_idx], next_layer=True, last_layer=False) if 'Add' in [node.type for node in next_nodes]: continue else: break from_name = names[from_idx] to_name = names[to_idx] logger.info('choose {} and {} to add_skip'.format(from_name, to_name)) return self.skip(model, from_name, to_name, config), True # add group operation
def main(): func_list = parse.parse(open(sys.argv[1]).read()) G = foo(func_list) #G = callgraph(func_list) nx.write_dot(G,"G.dot") #H = nx.dfs_tree(G,'solver') #nx.write_dot(H,"H.dot") #print nx.topological_sort(H)
def get_schedule(self, simulation): schedule = {host: [] for host in simulation.hosts} hosts_count = len(simulation.hosts) graph = simulation.get_task_graph() for idx, task in enumerate(networkx.topological_sort(graph)): schedule[simulation.hosts[idx % hosts_count]].append(task) return schedule
def get_schedule(self, simulation): schedule = {host: [] for host in simulation.hosts} graph = simulation.get_task_graph() for task in networkx.topological_sort(graph): schedule[random.choice(simulation.hosts)].append(task) return schedule
def oct_dict(cls, nxgraph, platform_model): """ Build optimistic cost table as an dict task->array. Args: nxgraph: networkx representation of task graph platform_model: platform linear model """ result = dict() for task in networkx.topological_sort(nxgraph, reverse=True): oct_row = numpy.zeros(platform_model.host_count) if not nxgraph[task]: result[task] = oct_row continue for host, idx in platform_model.host_map.items(): child_results = [] for child, edge_dict in nxgraph[task].items(): row = result[child].copy() row += child.amount / platform_model.speed comm_cost = numpy.ones(platform_model.host_count) * edge_dict["weight"] / platform_model.mean_bandwidth comm_cost += platform_model.mean_latency comm_cost[idx] = 0 row += comm_cost child_results.append(row.min()) oct_row[idx] = max(child_results) result[task] = oct_row return result
def findLongestPath(self, chr): """ find the longest path for a chr :param chr: chromosome name (str) :return: order (list), longest_weighted_path (list) """ # topological sorting and find the longest path dag = self.DAGs[chr] order = nx.topological_sort(dag) longest_weighted_path = self.longestWeightedPath(dag) # sys.stderr.write("[execute]\tPerforming topological sorting for " + str(chr) + "\n") # sys.stderr.write("[execute]\tFinding longest paths in " + str(chr) + "\n") print("[results]\tLongest weighted path in", chr, "\n", longest_weighted_path) return order, longest_weighted_path
def ordered_steps(cls, cfg): """ Return ordered steps from config """ return nx.topological_sort(cls.create_dag(cfg))
def get_task_to_run(self, tree): """Working from the bottom up, replace each node with a chain to its descendant, or celery.Group of descendants. :param tree: Dependancy graph of tasks :type tree: networkx.DiGraph :returns: chain to execute """ # TODO: This could be more parallel return chain(*[ maybe_signature(tree.node[name]['task'], self.celery_app) for name in nx.topological_sort(tree) ])
def create_directed_graphs(self): ''' :return: ''' self.directed_graphs = np.empty( (self.no_of_atoms, self.no_of_atoms - 1, 3), dtype=int) # parse all the atoms one by one and get directed graph to that atom # as the sink node for idx in range(self.no_of_atoms): # get shortest path from the root to all the other atoms and then reverse the edges. path = nx.single_source_dijkstra_path(self.graph, idx) G = nx.DiGraph() for i in range(self.no_of_atoms): temp = path[i] temp.reverse() G.add_path(temp) # do a topological sort to get a order of atoms with all edges pointing to the root topological_order = nx.topological_sort(G) sorted_path = np.empty((self.no_of_atoms - 1, 3)) no_of_incoming_edges = {} for i in range(self.no_of_atoms - 1): node = topological_order[i] edge = (nx.edges(G, node))[0] if edge[1] in no_of_incoming_edges: index = no_of_incoming_edges[edge[1]] no_of_incoming_edges[edge[1]] += 1 else: index = 0 no_of_incoming_edges[edge[1]] = 1 sorted_path[i, :] = [node, edge[1], index] self.directed_graphs[idx, :, :] = sorted_path
def dag(recipe_folder, config, packages="*", format='gml', hide_singletons=False): """ Export the DAG of packages to a graph format file for visualization """ dag, name2recipes = utils.get_dag(utils.get_recipes(recipe_folder, packages), config) if hide_singletons: for node in nx.nodes(dag): if dag.degree(node) == 0: dag.remove_node(node) if format == 'gml': nx.write_gml(dag, sys.stdout.buffer) elif format == 'dot': write_dot(dag, sys.stdout) elif format == 'txt': subdags = sorted(map(sorted, nx.connected_components(dag.to_undirected()))) subdags = sorted(subdags, key=len, reverse=True) singletons = [] for i, s in enumerate(subdags): if len(s) == 1: singletons += s continue print("# subdag {0}".format(i)) subdag = dag.subgraph(s) recipes = [ recipe for package in nx.topological_sort(subdag) for recipe in name2recipes[package]] print('\n'.join(recipes) + '\n') if not hide_singletons: print('# singletons') recipes = [recipe for package in singletons for recipe in name2recipes[package]] print('\n'.join(recipes) + '\n')
def _generateDAG(self): ''' Generate workflow DAG using networkx directed graph implementation. Return topological ordering of graphs. Note that nx.topological_sort(G) requires the graph to be acyclic. Cyclic behavior is hard to implement in practice since jobs are defined by calling specific dictionary elements. ''' # Instantiate directed graph, add job dependency edges. G = nx.DiGraph() for job in self.jobs: G.add_node(job) if 'dependsOn' in self.jobs[job]: for dependency in self.jobs[job]['dependsOn']: G.add_edge(dependency['jobKey'], self.jobs[job]['jobKey']) self.dag_graph = G # For printing purposes. # Sort jobs in graph using topological sort, assigning job buckets. # Jobs within the same bucket will be kicked off simultaneously. topology = nx.topological_sort(G) self.graph = [(0, topology[0])] for edge in topology[1:]: try: self.graph.append((len(nx.shortest_path(G, topology[0], edge)) - 1, edge)) except nx.exception.NetworkXNoPath as error: self.graph.append((0, edge))
def calculate_account_data(prov_document: provmodel.ProvDocument) -> list: """ Transforms a ProvDocument into a list of tuples including: ProvAgent, list of ProvRelations from agent, list of ProvElements associated to ProvAgent, list of Namespaces :param prov_document: Document to transform :type prov_document: :return: List of tuples(ProvAgent, list(), list(), list()) :rtype: list """ namespaces = prov_document.get_registered_namespaces() g = provgraph.prov_to_graph(prov_document=prov_document) sorted_nodes = topological_sort(g, reverse=True) agents = list(filter(lambda elem: isinstance(elem, provmodel.ProvAgent), sorted_nodes)) elements = list(filter(lambda elem: not isinstance(elem, provmodel.ProvAgent), sorted_nodes)) # Check on compatibility if not is_directed_acyclic_graph(g): raise Exception("Provenance graph is not acyclic") if isolates(g): raise Exception("Provenance not compatible with role-based concept. Has isolated Elements") for element in elements: if provmodel.ProvAgent not in [type(n) for n in g.neighbors(element)]: raise Exception( "Provenance not compatible with role-based concept. Element {} has not relation to any agent".format( element)) accounts = [] for agent in agents: # find out-going relations from agent agent_relations = [] for u, v in g.out_edges(agent): # Todo check if filter does not left out some info agent_relations.append(g.get_edge_data(u, v)[0]['relation']) agent_elements = {} i = 0 for element in elements: element_relations = [] if g.has_edge(element, agent): for u, v in set(g.out_edges(element)): for relation in g[u][v].values(): element_relations.append(relation['relation']) agent_elements[i] = {element: element_relations} i += 1 accounts.append((agent, agent_relations, agent_elements, namespaces)) return accounts
def compose_back(self, input_circuit, wire_map={}): """Apply the input circuit to the output of this circuit. The two bases must be "compatible" or an exception occurs. A subset of input qubits of the input circuit are mapped to a subset of output qubits of this circuit. wire_map[input_qubit_to_input_circuit] = output_qubit_of_self """ union_basis = self._make_union_basis(input_circuit) union_gates = self._make_union_gates(input_circuit) # Check the wire map for duplicate values if len(set(wire_map.values())) != len(wire_map): raise DAGCircuitError("duplicates in wire_map") add_qregs = self._check_wiremap_registers(wire_map, input_circuit.qregs, self.qregs) for register in add_qregs: self.add_qreg(register[0], register[1]) add_cregs = self._check_wiremap_registers(wire_map, input_circuit.cregs, self.cregs) for register in add_cregs: self.add_creg(register[0], register[1]) self._check_wiremap_validity(wire_map, input_circuit.input_map, self.output_map, input_circuit) # Compose self.basis = union_basis self.gates = union_gates topological_sort = nx.topological_sort(input_circuit.multi_graph) for node in topological_sort: nd = input_circuit.multi_graph.node[node] if nd["type"] == "in": # if in wire_map, get new name, else use existing name m_name = wire_map.get(nd["name"], nd["name"]) # the mapped wire should already exist assert m_name in self.output_map, \ "wire (%s,%d) not in self" % (m_name[0], m_name[1]) assert nd["name"] in input_circuit.wire_type, \ "inconsistent wire_type for (%s,%d) in input_circuit" \ % (nd["name"][0], nd["name"][1]) elif nd["type"] == "out": # ignore output nodes pass elif nd["type"] == "op": condition = self._map_condition(wire_map, nd["condition"]) self._check_condition(nd["name"], condition) m_qargs = list(map(lambda x: wire_map.get(x, x), nd["qargs"])) m_cargs = list(map(lambda x: wire_map.get(x, x), nd["cargs"])) self.apply_operation_back(nd["name"], m_qargs, m_cargs, nd["params"], condition) else: assert False, "bad node type %s" % nd["type"]
def compose_front(self, input_circuit, wire_map={}): """Apply the input circuit to the input of this circuit. The two bases must be "compatible" or an exception occurs. A subset of output qubits of the input circuit are mapped to a subset of input qubits of this circuit. """ union_basis = self._make_union_basis(input_circuit) union_gates = self._make_union_gates(input_circuit) # Check the wire map if len(set(wire_map.values())) != len(wire_map): raise DAGCircuitError("duplicates in wire_map") add_qregs = self._check_wiremap_registers(wire_map, input_circuit.qregs, self.qregs) for r in add_qregs: self.add_qreg(r[0], r[1]) add_cregs = self._check_wiremap_registers(wire_map, input_circuit.cregs, self.cregs) for r in add_cregs: self.add_creg(r[0], r[1]) self._check_wiremap_validity(wire_map, input_circuit.output_map, self.input_map, input_circuit) # Compose self.basis = union_basis self.gates = union_gates ts = nx.topological_sort(input_circuit.multi_graph, reverse=True) for n in ts: nd = input_circuit.multi_graph.node[n] if nd["type"] == "out": # if in wire_map, get new name, else use existing name m_name = wire_map.get(nd["name"], nd["name"]) # the mapped wire should already exist assert m_name in self.input_map, \ "wire (%s,%d) not in self" % (m_name[0], m_name[1]) assert nd["name"] in input_circuit.wire_type, \ "inconsistent wire_type for (%s,%d) in input_circuit" \ % (nd["name"][0], nd["name"][1]) elif nd["type"] == "in": # ignore input nodes pass elif nd["type"] == "op": condition = self._map_condition(wire_map, nd["condition"]) self._check_condition(nd["name"], condition) m_qargs = list(map(lambda x: wire_map.get(x, x), nd["qargs"])) m_cargs = list(map(lambda x: wire_map.get(x, x), nd["cargs"])) self.apply_operation_front(nd["name"], m_qargs, m_cargs, nd["params"], condition) else: assert False, "bad node type %s" % nd["type"]
def compile(cls, source_net, compiled_net): """Add observed nodes to the computation graph. Parameters ---------- source_net : nx.DiGraph compiled_net : nx.DiGraph Returns ------- compiled_net : nx.Digraph """ logger.debug("{} compiling...".format(cls.__name__)) observable = [] uses_observed = [] for node in nx.topological_sort(source_net): state = source_net.node[node] if state.get('_observable'): observable.append(node) cls.make_observed_copy(node, compiled_net) elif state.get('_uses_observed'): uses_observed.append(node) obs_node = cls.make_observed_copy(node, compiled_net, args_to_tuple) # Make edge to the using node compiled_net.add_edge(obs_node, node, param='observed') else: continue # Copy the edges if not state.get('_stochastic'): obs_node = observed_name(node) for parent in source_net.predecessors(node): if parent in observable: link_parent = observed_name(parent) else: link_parent = parent compiled_net.add_edge(link_parent, obs_node, source_net[parent][node].copy()) # Check that there are no stochastic nodes in the ancestors for node in uses_observed: # Use the observed version to query observed ancestors in the compiled_net obs_node = observed_name(node) for ancestor_node in nx.ancestors(compiled_net, obs_node): if '_stochastic' in source_net.node.get(ancestor_node, {}): raise ValueError("Observed nodes must be deterministic. Observed " "data depends on a non-deterministic node {}." .format(ancestor_node)) return compiled_net
def build(self, debug=False): """Returns a OpenMDAO Group from the variable graph. Parameters ---------- debug : bool True to print debug messages. Returns ------- grp : omdao.Group the OpenMDAO group that computes all variables. input_bounds : dict[str, any] a dictionary from input variable name to (min, max, ndim) tuple. """ input_bounds = {} ndim_dict = {} if not nx.is_directed_acyclic_graph(self._g): raise Exception('Dependency loop detected') grp = omdao.Group() prom = ['*'] for var in nx.topological_sort(self._g): nattrs = self._g.node[var] ndim = nattrs['ndim'] ndim_dict[var] = ndim if self._g.in_degree(var) == 0: if debug: # input variable print('Input variable: %s' % var) # range checking vmin, vmax = nattrs['min'], nattrs['max'] veq = nattrs.get('equals', None) if vmin > vmax: raise Exception('Variable %s input range not valid.' % var) input_bounds[var] = veq, vmin, vmax, ndim else: init_vals = {par: np.zeros(ndim_dict[par]) for par in self._g.predecessors_iter(var)} comp_name = 'comp__%s' % var if 'expr' in nattrs: eqn = '{}={}'.format(var, nattrs['expr']) init_vals[var] = np.zeros(ndim) # noinspection PyTypeChecker grp.add(comp_name, omdao.ExecComp(eqn, **init_vals), promotes=prom) elif 'fun_list' in nattrs: params = nattrs['params'] fun_list = nattrs['fun_list'] vec_params = nattrs['vec_params'] comp = VecFunComponent(var, fun_list, params, vector_params=vec_params) # noinspection PyTypeChecker grp.add(comp_name, comp, promotes=prom) else: raise Exception('Unknown attributes: {}'.format(nattrs)) return grp, input_bounds
def wider(self, model, config): topo_nodes = nx.topological_sort(model.graph) names = [node.name for node in topo_nodes if node.type == 'Conv2D' or node.type == 'Conv2D_Pooling' or node.type == 'Group'] # support group layer to wider #node.type == 'Conv2D' or node.type == 'Conv2D_Pooling'] max_iter = 100 for i in range(max_iter + 1): if i == max_iter: logger.info('can\'t find a suitable layer to apply wider operation,return origin model') return model, False # random choose a layer to wider, except last conv layer choice = names[np.random.randint(0, len(names) - 1)] cur_node = model.graph.get_nodes(choice)[0] next_nodes = model.graph.get_nodes(choice, next_layer=True, last_layer=False) if 'Conv2D' in [node.type for node in next_nodes] or 'Conv2D_Pooling' in [node.type for node in next_nodes]: break else: continue cur_width = cur_node.config['filters'] # for test # enlarge the max_cur_width #max_cur_width = (int((config.model_max_conv_width - config.model_min_conv_width) * cur_node.depth / config.model_max_depth) \ # + config.model_min_conv_width) * 5 # for test max_cur_width = 1024 width_ratio = np.random.rand() new_width = int(cur_width + width_ratio * (max_cur_width - cur_width)) if cur_node.type == 'Group': # make sure that new_width % group_num == 0 new_width = new_width // cur_node.config['group_num'] * cur_node.config['group_num'] if new_width <= cur_width: logger.info('{} layer\'s width up to limit!'.format(choice)) return model, False logger.info('choose {} to wider'.format(choice)) if cur_node.type == 'Group': return self.wider_group_conv2d(model, layer_name=choice, new_width=new_width, config=config), True else: return self.wider_conv2d(model, layer_name=choice, new_width=new_width, config=config), True
def _import_daggen(line_iter): _NODE_TYPES = {"ROOT", "END", "COMPUTATION", "TRANSFER"} result = nx.DiGraph() node_mapper = lambda nid: "task_%d" % nid nodes = {} skip = True for line in line_iter: line = line.strip() if line.startswith("NODE_COUNT"): skip=False continue if skip or not line: continue node_parts = line.split(" ") assert len(node_parts) == 6 magic, nodeid, children, nodetype, cost, parallel_ratio = node_parts assert magic == "NODE" nodeid = int(nodeid) children = list(map(int, children.split(","))) if children != "-" else [] assert nodetype in _NODE_TYPES cost = float(cost) # unused_for_now parallel_ratio = float(parallel_ratio) nodes[nodeid] = (nodetype, children, cost) for nodeid, (nodetype, _, cost) in nodes.items(): if nodetype != "TRANSFER": result.add_node(node_mapper(nodeid), weight=cost) for nodeid, (nodetype, children, _) in nodes.items(): if nodetype == "TRANSFER": continue for childid in children: childtype, grandchildren, transfercost = nodes[childid] if childtype == "TRANSFER": assert len(grandchildren) == 1 destination = grandchildren[0] weight = transfercost else: assert nodetype == "ROOT" or childtype=="END" destination = childid # TODO: Should be 0. # # Kludge to force order in 3rd-party HEFT implementation # (nodes connected by edges with zero weight get mixed # in HEFT priority list and violate precedence constraints) # # Can be removed as I can fix this BS in my HEFT weight = 1. result.add_edge(node_mapper(nodeid), node_mapper(destination), weight=weight) node_order = nx.topological_sort(result) return nx.relabel_nodes(result, { node_order[0]: "root", node_order[-1]: "end" })
def get_tasks_aest_alst(cls, nxgraph, platform_model): """ Return AEST and ALST of tasks. Args: nxgraph: full task graph as networkx.DiGraph platform_model: cscheduling.PlatformModel object Returns: tuple containg 2 dictionaries aest: task->aest_value alst: task->alst_value """ mean_speed = platform_model.mean_speed mean_bandwidth = platform_model.mean_bandwidth mean_latency = platform_model.mean_latency topological_order = networkx.topological_sort(nxgraph) # Average execution cost aec = {task: float(task.amount) / mean_speed for task in nxgraph} # Average earliest start time aest = {} # TODO: Check several roots and ends! root = topological_order[0] end = topological_order[-1] aest[root] = 0. for task in topological_order: parents = nxgraph.pred[task] if not parents: aest[task] = 0 continue aest[task] = max([ aest[parent] + aec[parent] + (nxgraph[parent][task]["weight"] / mean_bandwidth + mean_latency) for parent in parents ]) topological_order.reverse() # Average latest start time alst = {} alst[end] = aest[end] for task in topological_order: if not nxgraph[task]: alst[task] = aest[task] continue alst[task] = min([ alst[child] - (edge["weight"] / mean_bandwidth + mean_latency) for child, edge in nxgraph[task].items() ]) - aec[task] return aest, alst
def main(directory): log_reader = git_log_processing.GitLogReader(directory) log = log_reader.get_log_information() gitexplorer_database = GitExplorerBase.get_gitexplorer_database() gitexplorer_database.commit_collection.drop() gitexplorer_database.commit_collection.insert_many(log) queries.AggregatorRegistry.load('gitexplorer.queries.authors_per_file', 'gitexplorer.queries.commits_by_datetime', 'gitexplorer.queries.commits_by_filestats', 'gitexplorer.queries.commits_per_author', 'gitexplorer.queries.queries_per_commit') aggregations = list(map(queries.AggregatorRegistry.get, ['authors_per_file_path', 'commits_by_day_of_week', 'commits_by_hour_of_day', 'additions_deletions_lines_commits_by_file_path', 'commits_per_author', 'additions_deletions_lines_modifications_per_commit', 'average_additions_deletions_lines_modifications_per_commit', 'additions_deletions_lines_modifications_commits_by_date', 'average_additions_deletions_lines_modifications_commits_by_date', ])) dependencies = nx.DiGraph() for aggregation in aggregations: provides = aggregation.provides() dependencies.add_edge(provides, aggregation.requires()) sorted_dependencies = nx.topological_sort(dependencies, reverse=True) print(sorted_dependencies) for dependency in sorted_dependencies: for aggregation in aggregations: if(aggregation.name == dependency): aggregation().run() nx.draw(dependencies, with_labels=True) plt.show()