我们从Python开源项目中,提取了以下41个代码示例,用于说明如何使用multiprocessing.active_children()。
def access_register(proxy=None, timeout_check=True): i = 0 while i < _max_register_count: try: if timeout_check: TimeoutTracker(try_register)(proxy=proxy) else: print(try_register(proxy=proxy)) i += 1 print("SUCCESS %d" % i) except Exception as e: print(str(e) + 'Error , retrying') finally: for p in multiprocessing.active_children(): p.terminate() gc.collect()
def test_terminate(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._test_terminate) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) p.terminate() join = TimingWrapper(p.join) self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() # XXX sometimes get p.exitcode == 0 on Windows ... #self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_multiprocessing(): """Tests that the number of children we produce is correct""" # Selects a number at random so we can spot check num_workers = random.choice(range(2, multiprocessing.cpu_count() * 2 + 1)) app = Sanic('test_multiprocessing') process_list = set() def stop_on_alarm(*args): for process in multiprocessing.active_children(): process_list.add(process.pid) process.terminate() signal.signal(signal.SIGALRM, stop_on_alarm) signal.alarm(1) app.run(HOST, PORT, workers=num_workers) assert len(process_list) == num_workers
def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) t = 0.01 while len(multiprocessing.active_children()) > 1 and t < 5: time.sleep(t) t *= 2 gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. print('Shared objects which still exist at manager shutdown:') print(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None
def run_pool(workers, n_jobs=-1, sleep=0.1): # defensive copy workers = workers[:] if n_jobs < 1: n_jobs = multiprocessing.cpu_count() processes = [] p = None try: while True: active = multiprocessing.active_children() while len(active) < n_jobs and len(workers) > 0: p = workers.pop(0) p.start() processes.append(p) active = multiprocessing.active_children() if len(workers) == 0 and len(active) == 0: break time.sleep(sleep) except (KeyboardInterrupt, SystemExit): if p is not None: p.terminate() for p in processes: p.terminate() raise
def scanning_boosters(self): proces=[] for ip in self.target: k=len(multiprocessing.active_children()) if k==self.thread: time.sleep(3) self.thread=self.thread+30 mythread=multiprocessing.Process(target=self.checkping, args=(ip,)) mythread.start() proces.append(mythread) for mythread in proces: mythread.join() self.timeclose=time.time() self.showing_results() return # Printing Function
def test_multiprocessing(): """Tests that the number of children we produce is correct""" # Selects a number at random so we can spot check num_workers = random.choice(range(2, multiprocessing.cpu_count() * 2 + 1)) app = Mach9('test_multiprocessing') process_list = set() def stop_on_alarm(*args): for process in multiprocessing.active_children(): process_list.add(process.pid) process.terminate() signal.signal(signal.SIGALRM, stop_on_alarm) signal.alarm(1) app.run(HOST, PORT, workers=num_workers) assert len(process_list) == num_workers
def producer(self, producer_instance): with producer_instance as producer: yield producer assert len(multiprocessing.active_children()) == 0
def test_messages_not_duplicated(self, message, producer_instance): with capture_new_messages( message.topic ) as get_messages, producer_instance as producer: producer.publish(message) producer.flush() assert len(multiprocessing.active_children()) == 0 assert len(get_messages()) == 1
def test_child_processes_do_not_survive_an_exception(self, producer_instance, message): with pytest.raises(RandomException), producer_instance as producer: producer.publish(message) producer.flush() producer.publish(message) raise RandomException() assert len(multiprocessing.active_children()) == 0
def test_skip_publish_pii_message(self, pii_schema, payload, producer_instance): with reconfigure( encryption_type='AES_MODE_CBC-1', skip_messages_with_pii=True ), producer_instance as producer, mock.patch.object( data_pipeline._kafka_producer, 'logger' ) as mock_logger: pii_message = CreateMessage( schema_id=pii_schema.schema_id, payload=payload ) messages = self._publish_message(pii_message, producer) assert len(messages) == 0 assert len(multiprocessing.active_children()) == 0 call_args = ( "Skipping a PII message - uuid hex: {}, schema_id: {}, " "timestamp: {}, type: {}" ).format( pii_message.uuid_hex, pii_message.schema_id, pii_message.timestamp, pii_message.message_type.name ) assert mock_logger.info.call_args_list[0] == mock.call(call_args)
def test_publish_pii_payload_data_message( self, pii_schema, example_payload_data, producer_instance ): with reconfigure( encryption_type='AES_MODE_CBC-1', skip_messages_with_pii=False ), producer_instance as producer: pii_message = CreateMessage( schema_id=pii_schema.schema_id, payload_data=example_payload_data ) self._publish_and_assert_pii_message(pii_message, producer) assert len(multiprocessing.active_children()) == 0
def shutdown(self, c): ''' Shutdown this process ''' try: try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) if sys.stdout != sys.__stdout__: util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ util._run_finalizers(0) for p in active_children(): util.debug('terminating a child process of manager') p.terminate() for p in active_children(): util.debug('terminating a child process of manager') p.join() util._run_finalizers() util.info('manager exiting with exitcode 0') except: import traceback traceback.print_exc() finally: exit(0)
def _wait_for_processors_to_free_up(self, max_concurrent_processes): while len(active_children()) >= max_concurrent_processes: self.logger.debug("Waiting a few seconds for processors to free up") time.sleep(0.1)
def kill_process(self): proc_list = [] while multiprocessing.active_children(): for p in multiprocessing.active_children(): if p.is_alive(): p.terminate() proc_list.append(p.pid) if self.is_alive(): self.terminate() proc_list.append(self.pid) print ("\nTerminated Autorepeat Processes:", *set(proc_list), sep=' ')
def kill_children(): """ Kill child process """ for proc in multiprocessing.active_children(): LOG.info('Terminating %r [%d] ...', proc, proc.pid) proc.terminate() parent = psutil.Process(os.getpid()) for child in parent.children(recursive=True): LOG.info('Terminating process %r', child) child.kill()
def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children())
def test_number_of_objects(self): EXPECTED_NUMBER = 1 # the pool object is still alive multiprocessing.active_children() # discard dead process objs gc.collect() # do garbage collection refs = self.manager._number_of_objects() debug_info = self.manager._debug_info() if refs != EXPECTED_NUMBER: print self.manager._debug_info() print debug_info self.assertEqual(refs, EXPECTED_NUMBER) # # Test of creating a customized manager class #
def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children())
def on_shutdown(self): for child in multiprocessing.active_children(): self._logger.info("Killed Child") child.terminate() self._logger.info("Meta Reader Terminated")
def _map_len(self): return len(multiprocessing.active_children())
def onClosing(self): if messagebox.askokcancel("Quit","do you want to Quit?"): for child in multiprocessing.active_children(): kill_proc_tree(child.pid) if self.running: killFCEUX() self.master.destroy() self.master.quit()
def test_messages_published_without_flush(self, message, producer_instance): with capture_new_messages( message.topic ) as get_messages, producer_instance as producer: producer.publish(message) assert len(multiprocessing.active_children()) == 0 assert len(get_messages()) == 1
def close(self): """Closes the producer, flushing all buffered messages into Kafka. Calling this method directly is not recommended, instead, use the producer as a context manager:: with Producer() as producer: producer.publish(message) ... producer.publish(message) """ self.registrar.stop() self.monitor.close() self._kafka_producer.close() assert len(multiprocessing.active_children()) == 0
def test_server_multiproc(set_timeout, restore_signal): started = mp.Value('i', 0) terminated = mp.Value('i', 0) proc_idxs = mp.Array('i', 3) @aiotools.actxmgr async def myserver(loop, proc_idx, args): started, terminated, proc_idxs = args await asyncio.sleep(0) with started.get_lock(): started.value += 1 proc_idxs[proc_idx] = proc_idx yield await asyncio.sleep(0) with terminated.get_lock(): terminated.value += 1 def interrupt(): os.kill(0, signal.SIGINT) set_timeout(0.2, interrupt) aiotools.start_server(myserver, num_workers=3, args=(started, terminated, proc_idxs)) assert started.value == 3 assert terminated.value == 3 assert list(proc_idxs) == [0, 1, 2] assert len(mp.active_children()) == 0
def start(debug=False): #check master running if master_pid("c") is True: cmsg("Service master is running..., start action exit.", "error") sys.exit(0) try: worker_list, worker_config_list = enabled_worker() process_num = multiprocessing.cpu_count()*2 pid_list = [] pool = multiprocessing.Pool(processes=process_num) # worker_max = [int(worker_config_list[w]["setup"]["process_num"]) for w in worker_list] for w in worker_list: if int(worker_config_list[w]["setup"]["process_num"]) < process_num: max_worker = int(worker_config_list[w]["setup"]["process_num"]) else: max_worker = process_num for i in xrange(max_worker): pool.apply_async(exec_worker, args=(w,)) for i in multiprocessing.active_children(): pid_list.append(i.pid) pid_list.append(os.getpid()) write_master_pid = master_pid("w", os.getpid()) write_subproc_pid = processors_list("w", pid_list) if (write_master_pid is None) or (write_subproc_pid is False): print "Have error, write master/subproc pid fail!" processors_list("k", pid_list) master_pid("k", os.getpid()) else: pool.close() pool.join() except Exception, ex: print ex
def test_terminate(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._test_terminate) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) p.terminate() if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() # XXX sometimes get p.exitcode == 0 on Windows ... #self.assertEqual(p.exitcode, -signal.SIGTERM)
def trainer(model, fifos, shared_buffer, args): iteration = 0 episode_rewards = [] episode_lengths = [] while len(multiprocessing.active_children()) > 0: batch_observations = [] batch_actions = [] batch_returns = [] batch_advantages = [] # loop over fifos from all runners for fifo in fifos: try: # wait for new trajectory observations, actions, returns, advantages, rewards, lengths = fifo.get(timeout=args.queue_timeout) # add to batch batch_observations.append(observations) batch_actions.append(actions) batch_returns.append(returns) batch_advantages.append(advantages) # log statistics episode_rewards += rewards episode_lengths += lengths except Empty: # just ignore empty fifos, batch will be smaller pass # if any of the runners produced trajectories if len(batch_observations) > 0: # form training data from observations, actions and returns x = np.array(batch_observations) p = np.array(batch_actions) R = np.array(batch_returns) A = np.array(batch_advantages) R = R[..., np.newaxis] # train the model total_loss, policy_loss, baseline_loss = model.train_on_batch([x, A], [p, R]) # share model parameters shared_buffer.raw = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL) iteration += 1 if iteration % args.stats_interval == 0: print("Iter %d: episodes %d, mean episode reward %.2f, mean episode length %.2f." % (iteration, len(episode_rewards), np.mean(episode_rewards), np.mean(episode_lengths))) episode_rewards = [] episode_lengths = []