我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用signal.SIGTERM。
def terminate(self): try: os.kill(self.pid, signal.SIGTERM) except OSError, e: if e.errno == errno.ESRCH: return # process already killed by someone else if e.errno == errno.EPERM: # this can only happen if the original session was started as # a different user. e.g. if the broker has been restarted with # --demote=<some other user>. but don't worry too much about it # as sessions are eventually terminated anyway. print( 'WARNING: session with PID=%d not terminated because it ' 'is owned by a different user. did you restart a broker ' 'as a different user?' % self.pid ) return raise e
def kas(argv): """ The main entry point of kas. """ create_logger() parser = kas_get_argparser() args = parser.parse_args(argv) if args.debug: logging.getLogger().setLevel(logging.DEBUG) logging.info('%s %s started', os.path.basename(sys.argv[0]), __version__) loop = asyncio.get_event_loop() for sig in (signal.SIGINT, signal.SIGTERM): loop.add_signal_handler(sig, interruption) atexit.register(_atexit_handler) for plugin in getattr(kasplugin, 'plugins', []): if plugin().run(args): return parser.print_help()
def run(self): """Option to calling manually calling start()/stop(). This will start the server and watch for signals to stop the server""" self.server.start() log.info(" ABCIServer started on port: {}".format(self.port)) # wait for interrupt evt = Event() gevent.signal(signal.SIGQUIT, evt.set) gevent.signal(signal.SIGTERM, evt.set) gevent.signal(signal.SIGINT, evt.set) evt.wait() log.info("Shutting down server") self.server.stop() # TM will spawn off 3 connections: mempool, consensus, query # If an error happens in 1 it still leaves the others open which # means you don't have all the connections available to TM
def test_err_in_fun(self): # Test that the original signal this process was hit with # is not returned in case fun raise an exception. Instead, # we're supposed to see retsig = 1. ret = pyrun(textwrap.dedent( """ import os, signal, imp, sys mod = imp.load_source("mod", r"{}") def foo(): sys.stderr = os.devnull 1 / 0 sig = signal.SIGTERM if os.name == 'posix' else \ signal.CTRL_C_EVENT mod.register_exit_fun(foo) os.kill(os.getpid(), sig) """.format(os.path.abspath(__file__), TESTFN) )) if POSIX: self.assertEqual(ret, 1) assert ret != signal.SIGTERM, strfsig(ret)
def subprocess_terminate( proc ) : try: proc.terminate() except AttributeError: print " no terminate method to Popen.." try: import signal os.kill( proc.pid , signal.SIGTERM) except AttributeError: print " no os.kill, using win32api.." try: import win32api PROCESS_TERMINATE = 1 handle = win32api.OpenProcess( PROCESS_TERMINATE, False, proc.pid) win32api.TerminateProcess(handle,-1) win32api.CloseHandle(handle) except ImportError: print " ERROR: could not terminate process."
def stop(self): """ Stop the shell """ if self.is_running(): try: os.kill(self._shell_pid, signal.SIGTERM) except OSError: pass start = time.time() while self.is_running() and (time.time() < (start + 0.2)): time.sleep(0.05) if self.is_running(): utils.ConsoleLogger.log("Failed to stop shell process") else: utils.ConsoleLogger.log("Shell process stopped")
def stop(self): pid = None if not os.path.exists(self.pidfile): logger.debug('pidfile not exist:' + self.pidfile) return try: pid = _read_file(self.pidfile) pid = int(pid) os.kill(pid, signal.SIGTERM) return except Exception as e: logger.warn('{e} while get and kill pid={pid}'.format( e=repr(e), pid=pid))
def _kill_ssh(self): if self.ssh_pid > 1: try: os.kill(self.ssh_pid, signal.SIGTERM) os.waitpid(self.ssh_pid, 0) except OSError, e: if e.errno not in [errno.ECHILD, errno.ESRCH]: raise Exception('unhandled errno: %d' % e.errno) self.self_pid = -1 try: os.close(self.ssh_fd) except OSError, e: if e.errno == errno.EBADF: pass # already closed else: print 'WHAT?', e raise e
def idle(self, stop_signals=(SIGINT, SIGTERM, SIGABRT)): """ Blocks until one of the signals are received and stops the updater Args: stop_signals: Iterable containing signals from the signal module that should be subscribed to. Updater.stop() will be called on receiving one of those signals. Defaults to (SIGINT, SIGTERM, SIGABRT) """ for sig in stop_signals: signal(sig, self.signal_handler) self.is_idle = True while self.is_idle: sleep(1)
def exit_test(): global periodic_checker if periodic_checker: periodic_checker.stop() os.kill(rolld_proc.pid, signal.SIGTERM) os.kill(nginx_proc.pid, signal.SIGTERM) # IOLoop.instance().add_timeout(time.time() + 5, partial(sys.exit, 0)) # check if we have zombies left try: lines = subprocess.check_output('ps auxw | grep python | grep app.py | grep -v grep', shell=True) print lines assert len(lines) == 0 except subprocess.CalledProcessError as grepexc: # grep shouldnt find anything so exit code should be 1 if grepexc.returncode == 1: pass else: raise # if everything is fine, just stop our ioloop now. IOLoop.current().stop()
def monitor_retransmit(): global proc with proc.stdout: for line in iter (proc.stdout.readline, b''): if stop_flag: break tokens = line.split() if len(tokens) < 5 or (tokens[2] == '-:-' and tokens[4] == '-:-') \ or tokens[0] == "TIME": continue key = tokens[1] + ':' + tokens[3] + ':6' mon_lock.acquire() if key not in mon_flows: print "updating mon_flows", key mon_flows.update ({key: 1}) mon_lock.release() os.kill (proc.pid, signal.SIGTERM) proc.wait() # wait for the subprocess to exit proc = None
def __init__(self, task_name, manager, config, timer, base_dir, backup_dir, **kwargs): self.task_name = task_name self.manager = manager self.config = config self.timer = timer self.base_dir = base_dir self.backup_dir = backup_dir self.args = kwargs self.verbose = self.config.verbose self.runnning = False self.stopped = False self.completed = False self.exit_code = 255 self.thread_count = None self.cpu_count = cpu_count() self.compression_method = 'none' self.compression_supported = ['none'] self.timer_name = self.__class__.__name__ signal(SIGINT, SIG_IGN) signal(SIGTERM, self.close)
def test_sigkill(self): self.assert_everything_has_started() self.subp.kill() time.sleep(0.5) lines = sorted(self.get_lines()) lines = self.hide_pids(lines) self.assertEqual([ b'ERROR:cotyledon.tests.examples:heavy terminate', b'ERROR:cotyledon.tests.examples:heavy terminate', b'INFO:cotyledon:Caught SIGTERM signal, graceful exiting of ' b'service heavy(0) [XXXX]', b'INFO:cotyledon:Caught SIGTERM signal, graceful exiting of ' b'service heavy(1) [XXXX]', b'INFO:cotyledon:Caught SIGTERM signal, graceful exiting of ' b'service light(0) [XXXX]', b'INFO:cotyledon:Parent process has died unexpectedly, ' b'heavy(0) [XXXX] exiting', b'INFO:cotyledon:Parent process has died unexpectedly, ' b'heavy(1) [XXXX] exiting', b'INFO:cotyledon:Parent process has died unexpectedly, ' b'light(0) [XXXX] exiting', ], lines) self.assert_everything_is_dead(-9)
def test_graceful_timeout_term(self): lines = self.get_lines(1) childpid = self.get_pid(lines[0]) self.subp.terminate() time.sleep(2) self.assertEqual(0, self.subp.poll()) self.assertRaises(OSError, os.kill, self.subp.pid, 0) self.assertRaises(OSError, os.kill, childpid, 0) lines = self.hide_pids(self.get_lines()) self.assertNotIn('ERROR:cotyledon.tests.examples:time.sleep done', lines) self.assertEqual([ b'INFO:cotyledon:Caught SIGTERM signal, graceful exiting of ' b'service buggy(0) [XXXX]', b'INFO:cotyledon:Graceful shutdown timeout (1) exceeded, ' b'exiting buggy(0) [XXXX] now.', b'DEBUG:cotyledon:Shutdown finish' ], lines[-3:])
def test_graceful_timeout_kill(self): lines = self.get_lines(1) childpid = self.get_pid(lines[0]) self.subp.kill() time.sleep(2) self.assertEqual(-9, self.subp.poll()) self.assertRaises(OSError, os.kill, self.subp.pid, 0) self.assertRaises(OSError, os.kill, childpid, 0) lines = self.hide_pids(self.get_lines()) self.assertNotIn('ERROR:cotyledon.tests.examples:time.sleep done', lines) self.assertEqual([ b'INFO:cotyledon:Parent process has died unexpectedly, buggy(0) ' b'[XXXX] exiting', b'INFO:cotyledon:Caught SIGTERM signal, graceful exiting of ' b'service buggy(0) [XXXX]', b'INFO:cotyledon:Graceful shutdown timeout (1) exceeded, ' b'exiting buggy(0) [XXXX] now.', ], lines[-3:])
def test_proc_exited(self): waiter = asyncio.Future(loop=self.loop) transport, protocol = self.create_transport(waiter) transport._process_exited(6) self.loop.run_until_complete(waiter) self.assertEqual(transport.get_returncode(), 6) self.assertTrue(protocol.connection_made.called) self.assertTrue(protocol.process_exited.called) self.assertTrue(protocol.connection_lost.called) self.assertEqual(protocol.connection_lost.call_args[0], (None,)) self.assertFalse(transport._closed) self.assertIsNone(transport._loop) self.assertIsNone(transport._proc) self.assertIsNone(transport._protocol) # methods must raise ProcessLookupError if the process exited self.assertRaises(ProcessLookupError, transport.send_signal, signal.SIGTERM) self.assertRaises(ProcessLookupError, transport.terminate) self.assertRaises(ProcessLookupError, transport.kill) transport.close()
def _child_process_handle_signal(self): # Setup child signal handlers differently def _sigterm(*args): self.signal_handler.clear() self.launcher.stop() def _sighup(*args): self.signal_handler.clear() raise SignalExit(signal.SIGHUP) self.signal_handler.clear() # Parent signals with SIGTERM when it wants us to go away. self.signal_handler.add_handler('SIGTERM', _sigterm) self.signal_handler.add_handler('SIGHUP', _sighup) self.signal_handler.add_handler('SIGINT', self._fast_exit)
def stop(self): """Terminate child processes and wait on each.""" self.running = False LOG.debug("Stop services.") for service in set( [wrap.service for wrap in self.children.values()]): service.stop() LOG.debug("Killing children.") for pid in self.children: try: os.kill(pid, signal.SIGTERM) except OSError as exc: if exc.errno != errno.ESRCH: raise # Wait for children to die if self.children: LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) while self.children: self._wait_child()
def send_signal(self, sig): """Send a signal to process pre-emptively checking whether PID has been reused (see signal module constants) . On Windows only SIGTERM is valid and is treated as an alias for kill(). """ if POSIX: self._send_signal(sig) else: # pragma: no cover if sig == signal.SIGTERM: self._proc.kill() # py >= 2.7 elif sig in (getattr(signal, "CTRL_C_EVENT", object()), getattr(signal, "CTRL_BREAK_EVENT", object())): self._proc.send_signal(sig) else: raise ValueError( "only SIGTERM, CTRL_C_EVENT and CTRL_BREAK_EVENT signals " "are supported on Windows")
def kill(self, sig): '''Sends a Unix signal to the subprocess. Use constants from the :mod:`signal` module to specify which signal. ''' if sys.platform == 'win32': if sig in [signal.SIGINT, signal.CTRL_C_EVENT]: sig = signal.CTRL_C_EVENT elif sig in [signal.SIGBREAK, signal.CTRL_BREAK_EVENT]: sig = signal.CTRL_BREAK_EVENT else: sig = signal.SIGTERM os.kill(self.proc.pid, sig)
def ShutdownLogcatMonitor(base_dir, logger): """Attempts to shutdown adb_logcat_monitor and blocks while waiting.""" try: monitor_pid_path = os.path.join(base_dir, 'LOGCAT_MONITOR_PID') with open(monitor_pid_path) as f: monitor_pid = int(f.readline()) logger.info('Sending SIGTERM to %d', monitor_pid) os.kill(monitor_pid, signal.SIGTERM) i = 0 while True: time.sleep(.2) if not os.path.exists(monitor_pid_path): return if not os.path.exists('/proc/%d' % monitor_pid): logger.warning('Monitor (pid %d) terminated uncleanly?', monitor_pid) return logger.info('Waiting for logcat process to terminate.') i += 1 if i >= 10: logger.warning('Monitor pid did not terminate. Continuing anyway.') return except (ValueError, IOError, OSError): logger.exception('Error signaling logcat monitor - continuing')
def stop(self): """ Stop the daemon """ # Get the pid from the pidfile try: pf = file(self.pidfile,'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % self.pidfile) return # not an error in a restart # Try killing the daemon process try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError, err: err = str(err) if err.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print str(err) sys.exit(1)
def stop(self): try: pf = file(self.pidfile,'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None if not pid: message = "pidfile %s does not exist. Daemon not running?\n" sys.stderr.write(message % self.pidfile) return # not an error in a restart print("killing process with pid {0}".format(pid)) try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError, err: err = str(err) if err.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print str(err) sys.exit(1)
def interruption(): """ Ignore SIGINT/SIGTERM in kas, let them be handled by our sub-processes """ pass
def test_EventAppPortConnectionSIGTERMNoPersist(self): self.localEvent = threading.Event() self.eventFlag = False self._nb_domMgr, domMgr = self.launchDomainManager("--nopersist", endpoint="giop:tcp::5679", dbURI=self._dbfile) self._nb_devMgr, devMgr = self.launchDeviceManager("/nodes/test_EventPortTestDevice_node/DeviceManager.dcd.xml") domainName = scatest.getTestDomainName() domMgr.installApplication("/waveforms/PortConnectFindByDomainFinderEvent/PortConnectFindByDomainFinderEvent.sad.xml") appFact = domMgr._get_applicationFactories()[0] app = appFact.create(appFact._get_name(), [], []) app.start() # Kill the domainMgr os.kill(self._nb_domMgr.pid, signal.SIGTERM) if not self.waitTermination(self._nb_domMgr, 5.0): self.fail("Domain Manager Failed to Die") # Restart the Domain Manager (which should restore the old channel) self._nb_domMgr, domMgr = self.launchDomainManager("--nopersist", endpoint="giop:tcp::5679", dbURI=self._dbfile) newappFact = domMgr._get_applicationFactories() self.assertEqual(len(newappFact), 0) apps = domMgr._get_applications() self.assertEqual(len(apps), 0) devMgrs = domMgr._get_deviceManagers() self.assertEqual(len(devMgrs), 0)
def test_DeviceManagerDisappear(self): self._nb_domMgr, self._domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) self._nb_devMgr, devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml") self.assertEqual(len(self._domMgr._get_applicationFactories()), 0) self.assertEqual(len(self._domMgr._get_applications()), 0) self._domMgr.installApplication("/waveforms/CommandWrapper/CommandWrapper.sad.xml") self.assertEqual(len(self._domMgr._get_applicationFactories()), 1) self.assertEqual(len(self._domMgr._get_applications()), 0) # Ensure the expected device is available self.assertNotEqual(devMgr, None) self.assertEqual(len(devMgr._get_registeredDevices()), 1) device = devMgr._get_registeredDevices()[0] # Kill the domainMgr and device manager os.kill(self._nb_domMgr.pid, signal.SIGKILL) if not self.waitTermination(self._nb_domMgr): self.fail("Domain Manager Failed to Die") os.kill(self._nb_devMgr.pid, signal.SIGTERM) if not self.waitTermination(self._nb_devMgr): self.fail("Device Manager Failed to Die") # Start the domainMgr again self._nb_domMgr, newDomMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) # Verify our client reference still is valid self.assertEqual(False, newDomMgr._non_existent()) self.assertEqual(newDomMgr._get_identifier(),'DCE:5f52f645-110f-4142-8cc9-4d9316ddd958') self.assertEqual(self._domMgr._get_identifier(),'DCE:5f52f645-110f-4142-8cc9-4d9316ddd958') self.assertEqual(False, self._domMgr._non_existent()) self.assertEqual(len(self._domMgr._get_deviceManagers()), 0) self.assertEqual(len(self._domMgr._get_applicationFactories()), 1)
def test_ApplicationUsesDevice(self): self._nb_domMgr, self._domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) self._nb_devMgr, devMgr = self.launchDeviceManager("/nodes/test_SADUsesDevice/DeviceManager.dcd.xml") self._domMgr.installApplication("/waveforms/SADUsesDeviceWave/SADUsesDeviceWaveExternalSimple.sad.xml") appFact = self._domMgr._get_applicationFactories()[0] app = appFact.create(appFact._get_name(), [], []) # Make sure that the allocation was made to the device prop = CF.DataType(id='simple_alloc', value=any.to_any(None)) for dev in devMgr._get_registeredDevices(): if dev._get_label() == 'SADUsesDevice_1': allocRes = dev.query([prop]) self.assertEquals(allocRes[0].value.value(), 8) # Kill the domainMgr os.kill(self._nb_domMgr.pid, signal.SIGTERM) # TODO if SIGKILL is used (simulating a nodeBooter unexpected abort, # the IOR and the newly spawned domain manager do not work if not self.waitTermination(self._nb_domMgr): self.fail("Domain Manager Failed to Die") # Start the domainMgr again self._nb_domMgr, newDomMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) # Capacity still allocated to device prop = CF.DataType(id='simple_alloc', value=any.to_any(None)) for dev in devMgr._get_registeredDevices(): if dev._get_label() == 'SADUsesDevice_1': allocRes = dev.query([prop]) self.assertEquals(allocRes[0].value.value(), 8) # Release app to free up device capacity to make sure usesdevicecapacties was properly restored newApp = newDomMgr._get_applications()[0] newApp.releaseObject() prop = CF.DataType(id='simple_alloc', value=any.to_any(None)) for dev in devMgr._get_registeredDevices(): if dev._get_label() == 'SADUsesDevice_1': allocRes = dev.query([prop]) self.assertEquals(allocRes[0].value.value(), 10)
def test_ApplicationStartOrder(self): self._nb_domMgr, self._domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) self._nb_devMgr, devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml") self._domMgr.installApplication("/waveforms/CommandWrapperStartOrderTests/CommandWrapperWithOrder.sad.xml") appFact = self._domMgr._get_applicationFactories()[0] app = appFact.create(appFact._get_name(), [], []) app.start() comps = app._get_registeredComponents() for c in comps: self.assertEquals(c.componentObject._get_started(), True) # Kill the domainMgr os.kill(self._nb_domMgr.pid, signal.SIGTERM) # TODO if SIGKILL is used (simulating a nodeBooter unexpected abort, # the IOR and the newly spawned domain manager do not work if not self.waitTermination(self._nb_domMgr): self.fail("Domain Manager Failed to Die") # Start the domainMgr again self._nb_domMgr, newDomMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) # Components should all still be started for c in comps: self.assertEquals(c.componentObject._get_started(), True) # Stop application to make sure that start order Resource variables were recovered properly app = newDomMgr._get_applications()[0] app.stop() for c in comps: self.assertEquals(c.componentObject._get_started(), False) # Start components to make sure that start also works app.start() for c in comps: self.assertEquals(c.componentObject._get_started(), True)
def test_RegisteredDomains(self): nb, domMgr = self.launchDomainManager(endpoint='giop:tcp::5679', dbURI=self._dbfile) testMgr1 = TestDomainManager('test1') domMgr.registerRemoteDomainManager(testMgr1._this()) testMgr2 = TestDomainManager('test2') domMgr.registerRemoteDomainManager(testMgr2._this()) remotes = [r._get_identifier() for r in domMgr._get_remoteDomainManagers()] self.assertEqual(len(remotes), 2) self.assert_(testMgr1._get_identifier() in remotes) self.assert_(testMgr2._get_identifier() in remotes) # Kill the DomainManager os.kill(nb.pid, signal.SIGTERM) if not self.waitTermination(nb): self.fail("Domain Manager Failed to Die") # Deactivate the second domain manager to check that its connection is # not restored poa = testMgr2._default_POA() oid = poa.servant_to_id(testMgr2) poa.deactivate_object(oid) # Re-launch and check that the remote domain is restored nb, domMgr = self.launchDomainManager(endpoint='giop:tcp::5679', dbURI=self._dbfile) remotes = domMgr._get_remoteDomainManagers() self.assertEqual(len(remotes), 1) self.assertEqual(remotes[0]._get_identifier(), testMgr1._get_identifier())
def test_Allocations(self): nb, domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml") # Make a couple of different allocations allocMgr = domMgr._get_allocationMgr() memCapacityId = 'DCE:8dcef419-b440-4bcf-b893-cab79b6024fb' bogoMipsId = 'DCE:5636c210-0346-4df7-a5a3-8fd34c5540a8' nicCapacityId = 'DCE:4f9a57fc-8fb3-47f6-b779-3c2692f52cf9' allocations = { 'test1': {memCapacityId:2048, nicCapacityId:0.125}, 'test2': {bogoMipsId:10000}} requests = [CF.AllocationManager.AllocationRequestType(k, properties.props_from_dict(v), [], [], 'test_Allocations') for k,v in allocations.iteritems()] results = allocMgr.allocate(requests) self.assertEqual(len(results), len(requests)) # Save the allocation state prior to termination pre = dict((al.allocationID, al) for al in allocMgr.allocations([])) self.assertEqual(len(pre), len(results)) # Kill the DomainManager os.kill(nb.pid, signal.SIGTERM) if not self.waitTermination(nb): self.fail("Domain Manager Failed to Die") # Re-launch and check that the allocation state remains the same; # implicitly tests that the AllocationManager reference is persistent self.launchDomainManager(endpoint='giop:tcp::5679', dbURI=self._dbfile) post = dict((al.allocationID, al) for al in allocMgr.allocations([])) self.assertEqual(len(pre), len(post)) self.assertEqual(pre.keys(), post.keys()) for allocId, status in pre.iteritems(): self._compareAllocation(status, post[allocId])
def test_AllocationPersistence(self): self.launchDeviceManager("/nodes/test_multiDomain_exec/DeviceManager.dcd.xml", domainManager=self._domainManager_1) self.launchDeviceManager("/nodes/test_multiDomain_uses/DeviceManager.dcd.xml", domainManager=self._domainManager_2) self._domainManager_1.registerRemoteDomainManager(self._domainManager_2) allocMgr_1 = self._domainManager_1._get_allocationMgr() # Make a couple of allocation requests that we know will have to be # split across the two domains execcap = {'DCE:8dcef419-b440-4bcf-b893-cab79b6024fb':1000, 'DCE:4f9a57fc-8fb3-47f6-b779-3c2692f52cf9':50.0} usescap = {'DCE:8cad8ca5-c155-4d1d-ae40-e194aa1d855f':1} requests = [allocMgrHelpers.createRequest('exec', properties.props_from_dict(execcap)), allocMgrHelpers.createRequest('uses', properties.props_from_dict(usescap))] results = dict((r.requestID, r) for r in allocMgr_1.allocate(requests)) self.assertEqual(len(requests), len(results)) usesId = results['uses'].allocationID execId = results['exec'].allocationID # Save the current allocation state pre = dict((al.allocationID, al) for al in allocMgr_1.allocations([])) # Kill the DomainManager os.kill(self._domainBooter_1.pid, signal.SIGTERM) if not self.waitTermination(self._domainBooter_1): self.fail("Domain Manager Failed to Die") # Re-launch and check that the allocation state remains the same self.launchDomainManager(endpoint='giop:tcp::5679', dbURI=self._dbfile) post = dict((al.allocationID, al) for al in allocMgr_1.allocations([])) self.assertEqual(len(pre), len(post)) self.assertEqual(pre.keys(), post.keys()) for allocId, status in pre.iteritems(): self.assert_(allocMgrHelpers.compareAllocationStatus(status, post[allocId]))
def terminateChildrenPidOnly(self, pid, signals=(signal.SIGINT, signal.SIGTERM)): ls = commands.getoutput('ls /proc') entries = ls.split('\n') for entry in entries: filename = '/proc/'+entry+'/status' try: fp = open(filename,'r') stuff = fp.readlines() fp.close() except: continue ret = '' for line in stuff: if 'PPid' in line: ret=line break if ret != '': parentPid = ret.split('\t')[-1][:-1] if parentPid == pid: self.terminateChildrenPidOnly(entry, signals) filename = '/proc/'+pid+'/status' for sig in signals: try: os.kill(int(pid), sig) except: continue done = False attemptCount = 0 while not done: try: fp = open(filename,'r') fp.close() attemptCount += 1 if attemptCount == 10: break time.sleep(0.1) except: done = True if not done: continue
def terminateChildren(self, child, signals=(signal.SIGINT, signal.SIGTERM)): ls = commands.getoutput('ls /proc') entries = ls.split('\n') for entry in entries: filename = '/proc/'+entry+'/status' try: fp = open(filename,'r') stuff = fp.readlines() except: continue for line in stuff: if 'PPid' in line: ret=line break if ret != '': parentPid = int(ret.split('\t')[-1][:-1]) if parentPid == child.pid: self.terminateChildrenPidOnly(entry, signals) if child.poll() != None: return try: for sig in signals: os.kill(child.pid, sig) if self.waitTermination(child): break child.wait() except OSError: pass
def __terminate_process( process, signals=(_signal.SIGINT, _signal.SIGTERM, _signal.SIGKILL) ): if process and process.poll() != None: return try: for sig in signals: _os.kill(process.pid, sig) if __waitTermination(process): break process.wait() except OSError, e: pass finally: pass
def setUp(self): """ Starts the component whose id matches the one stored in the IMPL_ID """ signal.signal(signal.SIGINT, self.tearDown) signal.signal(signal.SIGTERM, self.tearDown) signal.signal(signal.SIGQUIT, self.tearDown) global SOFT_PKG global IMPL_ID self.comp_obj = None self.comp = None # Use the globals by default self.impl = IMPL_ID self.spd_file = SOFT_PKG self.spd = SPDParser.parse(SOFT_PKG) try: self.prf_file = self.spd.get_propertyfile().get_localfile().get_name() if (self.prf_file[0] != '/'): self.prf_file = os.path.join(os.path.dirname(self.spd_file), self.prf_file) self.prf = PRFParser.parse(self.prf_file) except: self.prf_file = None self.prf = None self.scd_file = self.spd.get_descriptor().get_localfile().get_name() if (self.scd_file[0] != '/'): self.scd_file = os.path.join(os.path.dirname(self.spd_file), self.scd_file) self.scd = SCDParser.parse(self.scd_file) # create a map between prop ids and names if self.prf: self._props = prop_helpers.getPropNameDict(self.prf)
def __init__(self): helperBase.__init__(self) self.usesPortIORString = None self._providesPortDict = {} self._processes = {} self._STOP_SIGNALS = ((_signal.SIGINT, 1), (_signal.SIGTERM, 5), (_signal.SIGKILL, None))
def pause_job(self, job): # This signal will cause the refresh runner to update # the job to paused os.kill(job.pid, signal.SIGTERM) job.pid = None
def test_server_signals(self): server = spoon.server.TCPSpoon(("0.0.0.0", 30783)) calls = [ call(signal.SIGUSR1, server.reload_handler), call(signal.SIGTERM, server.shutdown_handler) ] self.mock_signal.assert_has_calls(calls)
def test_master_shutdown(self): shutdown = patch("spoon.server.TCPSpoon.shutdown").start() server = spoon.server.TCPSpork(("0.0.0.0", 30783)) server.prefork = 2 server.pids = [100, 101] server.shutdown() calls = [ call(100, signal.SIGTERM), call(101, signal.SIGTERM), ] self.mock_kill.assert_has_calls(calls)
def send_action(action, pidfile, logger=None): """Send a signal to an existing running daemon.""" if logger is None: logger = logging if not os.path.exists(pidfile): logger.critical("No pid file available: %s", pidfile) return with open(pidfile) as pidf: pid = int(pidf.read()) if action == "reload": os.kill(pid, signal.SIGUSR1) elif action == "stop": os.kill(pid, signal.SIGTERM)
def shutdown_handler(self, *args, **kwargs): """Handler for the SIGTERM signal. This should be used to kill the daemon and ensure proper clean-up. """ self.log.info("SIGTERM received. Shutting down.") t = threading.Thread(target=self.shutdown) t.start()
def send_signal(self, sig): """Send a signal to the process """ if sig == signal.SIGTERM: self.terminate() elif sig == signal.CTRL_C_EVENT: os.kill(self.pid, signal.CTRL_C_EVENT) elif sig == signal.CTRL_BREAK_EVENT: os.kill(self.pid, signal.CTRL_BREAK_EVENT) else: raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self): """Terminate the process with SIGTERM """ self.send_signal(signal.SIGTERM)
def terminate(self): if self.returncode is None: try: os.kill(self.pid, signal.SIGTERM) except OSError, e: if self.wait(timeout=0.1) is None: raise
def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _subprocess.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _subprocess.WaitForSingleObject(int(self._handle), msecs) if res == _subprocess.WAIT_OBJECT_0: code = _subprocess.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode
def setup_docker_sigterm_handler(): # type: () -> None ''' 'manage.py runserver' is not set up to deal with a SIGTERM signal, and instead expects a Ctrl-C to come to its child process. So we'll add a SIGTERM handler here that finds all our children and gracefully shuts them down, which provides a quick graceful exit from Docker. ''' def get_children(): # type: () -> Iterator[int] output = subprocess.check_output( "ps --ppid=%d -o pid | awk 'NR>1' | xargs echo" % os.getpid(), shell=True ) return map(int, output.split()) def handler(signum, frame): # type: (int, Any) -> None for child_pid in get_children(): try: os.kill(child_pid, signal.SIGTERM) os.waitpid(child_pid, 0) except OSError: pass sys.exit(0) info("Setting up Docker SIGTERM handler for quick, graceful exit.") signal.signal(signal.SIGTERM, handler)