Python resource 模块,RLIM_INFINITY 实例源码
我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用resource.RLIM_INFINITY。
def get_maximum_file_descriptors():
""" Get the maximum number of open file descriptors for this process.
:return: The number (integer) to use as the maximum number of open
files for this process.
The maximum is the process hard resource limit of maximum number of
open file descriptors. If the limit is “infinity”, a default value
of ``MAXFD`` is returned.
"""
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
result = limits[1]
if result == resource.RLIM_INFINITY:
result = MAXFD
return result
def test_worst_case(self):
sys.setrecursionlimit(10 ** 7) # max depth of recursion
resource.setrlimit(resource.RLIMIT_STACK,
(resource.RLIM_INFINITY, resource.RLIM_INFINITY))
# Worst case is a tree with one child per node, that is, a list
test_input = [-1 if x == 0 else x - 1 for x in range(10 ** 5)]
tree = tree_height.TreeHeight(len(test_input), test_input)
assert len(test_input) == tree.compute_height()
# def test_provided_input_data(self):
# files_wildcard = os.path.dirname(__file__) + "/tree_height_test_input/*"
# files = glob.glob(files_wildcard)
# for file in files:
# with open(file) as f:
# size = f.readline()
# parent = f.readline()
# test_input = [int(x) for x in parent.split()]
# tree = tree_height.TreeHeight(len(test_input), test_input)
def close_open_files():
'''Closes all open files. Useful after a fork.'''
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = MAXFD
for fd in reversed(range(maxfd)):
try:
os.close(fd)
except OSError, e:
if e.errno == errno.EBADF:
pass # File not open
else:
raise Exception("Failed to close file descriptor %d: %s" % (fd, e))
def _setup_env(self):
prefix = "/tmp/tracer_"
curdir = os.getcwd()
tmpdir = tempfile.mkdtemp(prefix=prefix)
# allow cores to be dumped
saved_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
binaries_old = [ ]
for binary in self._binaries:
binaries_old.append(binary)
binary_replacements = [ ]
for i, binary in enumerate(self._binaries):
binary_replacements.append(os.path.join(tmpdir,"binary_replacement_%d" % i))
for binary_o, binary_r in zip(binaries_old, binary_replacements):
shutil.copy(binary_o, binary_r)
self._binaries = binary_replacements
if self.argv is not None and not self.is_multicb:
self.argv = self._binaries + self.argv[1:]
os.chdir(tmpdir)
try:
yield (tmpdir,binary_replacements)
finally:
assert tmpdir.startswith(prefix)
shutil.rmtree(tmpdir)
os.chdir(curdir)
resource.setrlimit(resource.RLIMIT_CORE, saved_limit)
self._binaries = binaries_old
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def test_above_fd_setsize(self):
# A scalable implementation should have no problem with more than
# FD_SETSIZE file descriptors. Since we don't know the value, we just
# try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if hard == resource.RLIM_INFINITY:
self.skipTest("RLIMIT_NOFILE is infinite")
try: # If we're on a *BSD system, the limit tag is different.
_, bsd_hard = resource.getrlimit(resource.RLIMIT_OFILE)
if bsd_hard == resource.RLIM_INFINITY:
self.skipTest("RLIMIT_OFILE is infinite")
if bsd_hard < hard:
hard = bsd_hard
# NOTE: AttributeError resource.RLIMIT_OFILE is not defined on Mac OS.
except (OSError, resource.error, AttributeError):
pass
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
(soft, hard))
limit_nofile = min(hard, 2 ** 16)
except (OSError, ValueError):
limit_nofile = soft
# Guard against already allocated FDs
limit_nofile -= 256
limit_nofile = max(0, limit_nofile)
s = self.make_selector()
for i in range(limit_nofile // 2):
rd, wr = self.make_socketpair()
s.register(rd, selectors2.EVENT_READ)
s.register(wr, selectors2.EVENT_WRITE)
self.assertEqual(limit_nofile // 2, len(s.select()))
def _setup_env(self):
prefix = "/dev/shm/tracer_"
curdir = os.getcwd()
tmpdir = tempfile.mkdtemp(prefix=prefix)
# dont prefilter the core
if len(self.binaries) > 1:
with open("/proc/self/coredump_filter", "wb") as f:
f.write("00000077")
# allow cores to be dumped
saved_limit = resource.getrlimit(resource.RLIMIT_CORE)
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
binaries_old = [ ]
for binary in self.binaries:
binaries_old.append(os.path.abspath(binary))
self.binaries = list(binaries_old)
os.chdir(tmpdir)
try:
yield (tmpdir, self.binaries[0])
finally:
assert tmpdir.startswith(prefix)
shutil.rmtree(tmpdir)
os.chdir(curdir)
resource.setrlimit(resource.RLIMIT_CORE, saved_limit)
self.binaries = binaries_old
def __init__(self,nvim):
Base.__init__(self, nvim)
self._snippet_engine = nvim.vars['cm_completed_snippet_engine']
# workaround for #62
try:
import resource
import psutil
mem = psutil.virtual_memory()
resource.setrlimit(resource.RLIMIT_DATA, (mem.total/3, resource.RLIM_INFINITY))
except Exception as ex:
logger.exception("set RLIMIT_DATA failed. %s", ex)
pass
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def raise_limits():
import resource
try:
_, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
info("Current limits, soft and hard : {} {}".format(_, hard))
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
except ValueError:
error("Exceeds limit {}, infinity is {}".format(hard, resource.RLIM_INFINITY))
except resource.error:
return False
except OSError as e:
critical('You may need to check ulimit parameter: {}'.format(e))
raise e
return True
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def _autoclose_files(shielded=None, fallback_limit=1024):
''' Automatically close any open file descriptors.
shielded is iterable of file descriptors.
'''
# Process shielded.
shielded = default_to(shielded, [])
# Figure out the maximum number of files to try to close.
# This returns a tuple of softlimit, hardlimit; the hardlimit is always
# greater.
softlimit, hardlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
# If the hard limit is infinity, we can't iterate to it.
if hardlimit == resource.RLIM_INFINITY:
# Check the soft limit. If it's also infinity, fallback to guess.
if softlimit == resource.RLIM_INFINITY:
fdlimit = fallback_limit
# The soft limit is finite, so fallback to that.
else:
fdlimit = softlimit
# The hard limit is not infinity, so prefer it.
else:
fdlimit = hardlimit
# Skip fd 0, 1, 2, which are used by stdin, stdout, and stderr
# (respectively)
ranges_to_close = _make_range_tuples(
start = 3,
stop = fdlimit,
exclude = shielded
)
for start, stop in ranges_to_close:
# How nice of os to include this for us!
os.closerange(start, stop)
def setupEnvironment(config):
"""
Prepare the environment before the server is started.
For example asan options, working directory, ASLR and ulimit.
"""
# Silence warnings from the ptrace library
#logging.getLogger().setLevel(logging.ERROR)
# Most important is to set log_path so we have access to the asan logs
asanOpts = ""
asanOpts += "color=never:verbosity=0:leak_check_at_exit=false:"
asanOpts += "abort_on_error=true:log_path=" + config["temp_dir"] + "/asan"
os.environ["ASAN_OPTIONS"] = asanOpts
# Tell Glibc to abort on heap corruption but not dump a bunch of output
os.environ["MALLOC_CHECK_"] = "2"
# Check ASLR status
if config["ignore_aslr_status"] is False:
aslrStatusFile = "/proc/sys/kernel/randomize_va_space"
d = ""
with open(aslrStatusFile, "r") as f:
d = f.read()
config["env_aslr_status"] = d
if "disable_aslr_check" not in config and d is not "0":
logging.error("ASLR Enabled, please disable it:")
logging.error(" echo 0 | sudo tee /proc/sys/kernel/randomize_va_space")
sys.exit(1)
# set resources
# core file:
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
# set working directory
# TODO FIXME
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def set_time_limit(time_limit):
if time_limit is None:
return
assert can_set_limits()
# Don't try to raise the hard limit.
_, external_hard_limit = resource.getrlimit(resource.RLIMIT_CPU)
if external_hard_limit == resource.RLIM_INFINITY:
external_hard_limit = float("inf")
assert time_limit <= external_hard_limit, (time_limit, external_hard_limit)
# Soft limit reached --> SIGXCPU.
# Hard limit reached --> SIGKILL.
soft_limit, hard_limit = _get_soft_and_hard_time_limits(
time_limit, external_hard_limit)
_set_limit(resource.RLIMIT_CPU, soft_limit, hard_limit)
def _get_external_limit(kind):
if not can_set_limits():
return None
# Limits are either positive values or -1 (RLIM_INFINITY).
soft, hard = resource.getrlimit(kind)
if soft != resource.RLIM_INFINITY:
return soft
elif hard != resource.RLIM_INFINITY:
return hard
else:
return None
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def callInteractive(cmd, replace=True, search_path=False, *args, **kwargs):
if replace:
if args or kwargs:
_log.errorExit(
'Extra args are not supported for replace call')
sys = _ext.sys
subprocess = _ext.subprocess
os = _ext.os
_log.infoLabel('Exec: ', subprocess.list2cmdline(cmd))
sys.stdout.flush()
sys.stderr.flush()
# There is a problem of left FDs in Python 2
#---
import resource
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = 10240
for fd in range(3, maxfd):
try:
os.close(fd)
except OSError:
pass
#---
if search_path:
os.execvp(cmd[0], cmd)
else:
os.execv(cmd[0], cmd)
else:
callExternal(cmd, user_interaction=True, *args, **kwargs)
def set_time_limit(time_limit):
if time_limit is None:
return
assert can_set_limits()
# Don't try to raise the hard limit.
_, external_hard_limit = resource.getrlimit(resource.RLIMIT_CPU)
if external_hard_limit == resource.RLIM_INFINITY:
external_hard_limit = float("inf")
assert time_limit <= external_hard_limit, (time_limit, external_hard_limit)
# Soft limit reached --> SIGXCPU.
# Hard limit reached --> SIGKILL.
soft_limit, hard_limit = _get_soft_and_hard_time_limits(
time_limit, external_hard_limit)
_set_limit(resource.RLIMIT_CPU, soft_limit, hard_limit)
def _get_external_limit(kind):
if not can_set_limits():
return None
# Limits are either positive values or -1 (RLIM_INFINITY).
soft, hard = resource.getrlimit(kind)
if soft != resource.RLIM_INFINITY:
return soft
elif hard != resource.RLIM_INFINITY:
return hard
else:
return None
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def get_maxfd():
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
return maxfd
def daemonize(
work_dir = None, chroot_dir = None,
umask = None, uid = None, gid = None,
pidfile = None, files_preserve = [], signals = {}):
# Dirs, limits, users
if chroot_dir is not None:
os.chdir(chroot_dir)
os.chroot(chroot_dir)
if umask is not None:
os.umask(umask)
if work_dir is not None:
os.chdir(work_dir)
if gid is not None:
os.setgid(gid)
if uid is not None:
os.setuid(uid)
# Doublefork, split session
if os.fork()>0:
os._exit(0)
os.setsid()
if os.fork()>0:
os._exit(0)
# Setup signal handlers
for (signum, handler) in signals.items():
signal.signal(signum, handler)
# Close descriptors
descr_preserve = set(f.fileno() for f in files_preserve)
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd==resource.RLIM_INFINITY:
maxfd = 65535
for fd in range(maxfd, 3, -1): # 3 means omit stdin, stdout, stderr
if fd not in descr_preserve:
try:
os.close(fd)
except Exception:
pass
# Redirect stdin, stdout, stderr to /dev/null
devnull = os.open(os.devnull, os.O_RDWR)
for fd in range(3):
os.dup2(devnull, fd)
# PID file
if pidfile is not None:
pidd = os.open(pidfile, os.O_RDWR|os.O_CREAT|os.O_EXCL|os.O_TRUNC)
os.write(pidd, str(os.getpid())+"\n")
os.close(pidd)
# Define and setup atexit closure
@atexit.register
def unlink_pid():
try:
os.unlink(pidfile)
except Exception:
pass