我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用fcntl.LOCK_NB。
def __enter__(self): if self.path is None: return self.pidfile self.pidfile = open(self.path, "a+") try: fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: self.pidfile = None raise SystemExit("Already running according to " + self.path) self.pidfile.seek(0) self.pidfile.truncate() self.pidfile.write(str(os.getpid())) self.pidfile.flush() self.pidfile.seek(0) return self.pidfile
def do_magic(self): if OS_WIN: try: if os.path.exists(LOCK_PATH): os.unlink(LOCK_PATH) self.fh = os.open(LOCK_PATH, os.O_CREAT | os.O_EXCL | os.O_RDWR) except EnvironmentError as err: if err.errno == 13: self.is_running = True else: raise else: try: self.fh = open(LOCK_PATH, 'w') fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB) except EnvironmentError as err: if self.fh is not None: self.is_running = True else: raise
def open(self): if self.opened: return self.lock = open(SETTINGS.CACHE_PATH + '.lock', 'ab') try: fcntl.flock(self.lock, fcntl.LOCK_EX | fcntl.LOCK_NB) mode = 'c' except IOError: logger.warn("Cache locked, using read-only") mode = 'r' self.lock.close() self.lock = None try: self.storage = shelve.open(SETTINGS.CACHE_PATH, mode) except Exception as e: if mode != 'c': raise logger.warn("Dropping corrupted cache on %s", e) self.lock.truncate(0) self.storage = shelve.open(SETTINGS.CACHE_PATH, mode) self.opened = True
def lock_method(lock_filename): ''' Use an OS lock such that a method can only be called once at a time. ''' def decorator(func): @functools.wraps(func) def lock_and_run_method(*args, **kwargs): # Only run this program if it's not already running # Snippet based on # http://linux.byexamples.com/archives/494/how-can-i-avoid-running-a-python-script-multiple-times-implement-file-locking/ fp = open(lock_filename, 'w') try: fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: raise SystemExit( "This program is already running. Please stop the current process or " + "remove " + lock_filename + " to run this script." ) return func(*args, **kwargs) return lock_and_run_method return decorator
def store(proxys): pidfile = open(proxyFilePath, "a") for i in range(10): try: fcntl.flock(pidfile, fcntl.LOCK_EX | fcntl.LOCK_NB) # LOCK_EX ???:???????????????????????? # LOCK_NB ????: ????????????????????????????????????? if type(proxys) == type([]): for proxy in proxys: pidfile.write(proxy + '\n') else: pidfile.write(proxys + '\n') pidfile.close() break except: # print "another instance is running..." time.sleep(3)
def lock_path(path, timeout=0): fd = os.open(path, os.O_CREAT) flags = fcntl.fcntl(fd, fcntl.F_GETFD, 0) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(fd, fcntl.F_SETFD, flags) started = time.time() while True: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: if started < time.time() - timeout: raise LockError("Couldn't obtain lock") else: break time.sleep(0.1) def unlock_path(): fcntl.flock(fd, fcntl.LOCK_UN) os.close(fd) return unlock_path
def __init__(self, addr, conf, log, fd=None): if fd is None: try: st = os.stat(addr) except OSError as e: if e.args[0] != errno.ENOENT: raise else: if stat.S_ISSOCK(st.st_mode): os.remove(addr) else: raise ValueError("%r is not a socket" % addr) self.parent = os.getpid() super(UnixSocket, self).__init__(addr, conf, log, fd=fd) # each arbiter grabs a shared lock on the unix socket. fcntl.lockf(self.sock, fcntl.LOCK_SH | fcntl.LOCK_NB)
def write_pid(path): """Writes our PID to *path*.""" try: pid = os.getpid() with io.open(path, mode='w', encoding='utf-8') as pidfile: # Get a non-blocking exclusive lock fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) pidfile.seek(0) pidfile.truncate(0) pidfile.write(unicode(pid)) except: logging.error(_("Could not write PID file: %s") % path) raise # This raises the original exception finally: try: pidfile.close() except: pass
def isSingleInstance(flavor_id=""): global fp basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace( "/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock' lockfile = os.path.normpath(tempfile.gettempdir() + '/' + basename) if sys.platform == 'win32': try: if os.path.exists(lockfile): os.unlink(lockfile) fp = os.open( lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) except OSError: return False else: # non Windows fp = open(lockfile, 'w') try: fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: return False return True
def acquire(self): if self.mutex_debug: print("I2C mutex acquire") acquired = False while not acquired: try: self.DexterLockI2C_handle = open('/run/lock/DexterLockI2C', 'w') # lock fcntl.lockf(self.DexterLockI2C_handle, fcntl.LOCK_EX | fcntl.LOCK_NB) acquired = True except IOError: # already locked by a different process time.sleep(0.001) except Exception as e: print(e) if self.mutex_debug: print("I2C mutex acquired {}".format(time.time()))
def lock(self): """ Locks the package to avoid concurrent operations on its shared resources. Currently, the only resource shared among scripts executed from different directories is the repository. """ if not self.locking_enabled: LOG.debug("This package has no shared resources to lock") return LOG.debug("Checking for lock on file {}.".format(self.lock_file_path)) self.lock_file = open(self.lock_file_path, "w") try: fcntl.lockf(self.lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as exc: RESOURCE_UNAVAILABLE_ERROR = 11 if exc.errno == RESOURCE_UNAVAILABLE_ERROR: LOG.info("Waiting for other process to finish operations " "on {}.".format(self.name)) else: raise fcntl.lockf(self.lock_file, fcntl.LOCK_EX)
def write_data(self): if self.config_newer_than_data(): self.read_data() return False else: try: self.data = {"clouds":self.quads.clouds.data, "hosts":self.quads.hosts.data, "history":self.quads.history.data, "cloud_history":self.quads.cloud_history.data} with open(self.config, 'w') as yaml_file: fcntl.flock(yaml_file, fcntl.LOCK_EX | fcntl.LOCK_NB) yaml_file.write(yaml.dump(self.data, default_flow_style=False)) fcntl.flock(yaml_file, fcntl.LOCK_UN) self.read_data() return True except Exception, ex: self.logger.error("There was a problem with your file %s" % ex) return False
def _try_lock(): """Check and create lock file - prevent running application twice. Return lock file handler. """ lock_file_path = _find_config_file("app.lock", False) _check_dir_for_file(lock_file_path) try: if fcntl is not None: lock_file = open(lock_file_path, "w") fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) else: if os.path.isfile(lock_file_path): _LOG.error("another instance detected (lock file exists) " "- exiting") return None lock_file = open(lock_file_path, "w") return lock_file except IOError as err: import errno if err.errno == errno.EAGAIN: _LOG.error("another instance detected - exiting") else: _LOG.exception("locking failed: %s", err) return None
def _lock(fileno): """Try to lock a file. Return True on success.""" # closing the file unlocks it, so we don't need to unlock here if platform.system() == 'Windows': try: msvcrt.locking(fileno, msvcrt.LK_NBLCK, 10) return True except PermissionError: return False else: try: fcntl.lockf(fileno, fcntl.LOCK_EX | fcntl.LOCK_NB) return True # the docs recommend catching both of these except (BlockingIOError, PermissionError): return False
def is_tor_data_dir_usable(tor_data_dir): """ Checks if the Tor data dir specified is usable. This means that it is not being locked and we have permissions to write to it. """ if not os.path.exists(tor_data_dir): return True try: fcntl.flock(open(os.path.join(tor_data_dir, 'lock'), 'w'), fcntl.LOCK_EX | fcntl.LOCK_NB) return True except (IOError, OSError) as err: if err.errno == errno.EACCES: # Permission error return False elif err.errno == errno.EAGAIN: # File locked return False
def flock(path): """Attempt to acquire a POSIX file lock. """ with open(path, "w+") as lf: try: fcntl.flock(lf, fcntl.LOCK_EX | fcntl.LOCK_NB) acquired = True yield acquired except OSError: acquired = False yield acquired finally: if acquired: fcntl.flock(lf, fcntl.LOCK_UN)
def open_with_lock(filename, mode): with open(filename, mode) as f: while True: try: fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) yield f break except IOError as e: # raise on unrelated IOErrors if e.errno != errno.EAGAIN: raise else: time.sleep(0.1) try: fcntl.flock(f.fileno(), fcntl.LOCK_UN) except Exception: pass
def acquire_lock(path): """ little tool to do EAGAIN until lockfile released :param path: :return: path """ lock_file = open(path, 'w') while True: send_to_syslog("attempting to acquire lock %s" % path) try: fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) send_to_syslog("acquired lock %s" % path) return lock_file except IOError as e: send_to_syslog("failed to acquire lock %s because '%s' - waiting 1 second" % (path, e)) time.sleep(1)
def uidNumber_getnext(self): """Get the next available uidNumber for adding a new user. Locks uidNumber file, reads number. Returns (file descriptor, uidNumber). uidNumber_savenext() must be called once the uidNumber is used successfully.""" uid_num_file = os.open(rbconfig.file_uidNumber, os.O_RDWR) retries = 0 while 1: try: fcntl.lockf(uid_num_file, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: retries += 1 if retries == 20: raise RBFatalError( ('Could not lock uidNumber.txt file after 20 attempts.' 'Please try again!')) time.sleep(0.5) else: break num_uid = int(os.read(uid_num_file, 32)) return uid_num_file, num_uid
def main(): exit_status = 1 try: args = parse_args() # Make sure the exporter is only running once. lock_file = '/var/lock/{}.lock'.format(os.path.basename(sys.argv[0])) lock_fd = os.open(lock_file, os.O_CREAT) lock_success = False try: fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) lock_success = True except IOError: msg = 'Failed to export metrics, another instance is running.' syslog.syslog(syslog.LOG_INFO, msg) sys.stderr.write(msg + '\n') if lock_success: # Create a new registry, otherwise unwanted default collectors are # added automatically. registry = prometheus_client.CollectorRegistry() # Register our own collector and write metrics to STDOUT. registry.register(CephRgwCollector(**vars(args))) sys.stdout.write(prometheus_client.generate_latest(registry)) sys.stdout.flush() # Unlock the lock file. fcntl.flock(lock_fd, fcntl.LOCK_UN) exit_status = 0 except Exception as e: syslog.syslog(syslog.LOG_ERR, str(e)) # Cleanup os.close(lock_fd) if lock_success: try: os.unlink(lock_file) except: pass sys.exit(exit_status)
def _lockonly(file): _msg('got file #', file.fileno()) try: flock(file, LOCK_EX | LOCK_NB) except IOError: _msg('failed to lock') return False else: _msg('locked successfully') return True
def lock (self): ''' Creates and holds on to the lock file with exclusive access. Returns True if lock successful, False if it is not, and raises an exception upon operating system errors encountered creating the lock file. ''' try: # # Create or else open and trucate lock file, in read-write mode. # # A crashed app might not delete the lock file, so the # os.O_CREAT | os.O_EXCL combination that guarantees # atomic create isn't useful here. That is, we don't want to # fail locking just because the file exists. # # Could use os.O_EXLOCK, but that doesn't exist yet in my Python # self.lockfd = os.open (self.lockfile, os.O_TRUNC | os.O_CREAT | os.O_RDWR) # Acquire exclusive lock on the file, but don't block waiting for it fcntl.flock (self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Writing to file is pointless, nobody can see it os.write (self.lockfd, "My Lockfile") return True except (OSError, IOError), e: # Lock cannot be acquired is okay, everything else reraise exception if e.errno in (errno.EACCES, errno.EAGAIN): return False else: raise
def __enter__(self): self.pidfile = open(self.path, "a+") try: fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: raise SystemExit("Already running according to " + self.path) self.pidfile.seek(0) self.pidfile.truncate() self.pidfile.write(str(os.getpid())) self.pidfile.flush() self.pidfile.seek(0) return self.pidfile
def __init__(self, mutex_name): check_valid_mutex_name(mutex_name) filename = os.path.join(tempfile.gettempdir(), mutex_name) try: handle = open(filename, 'w') fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB) except: self._release_mutex = NULL self._acquired = False try: handle.close() except: pass else: def release_mutex(*args, **kwargs): # Note: can't use self here! if not getattr(release_mutex, 'called', False): release_mutex.called = True try: fcntl.flock(handle, fcntl.LOCK_UN) except: traceback.print_exc() try: handle.close() except: traceback.print_exc() try: # Removing is pretty much optional (but let's do it to keep the # filesystem cleaner). os.unlink(filename) except: pass # Don't use __del__: this approach doesn't have as many pitfalls. self._ref = weakref.ref(self, release_mutex) self._release_mutex = release_mutex self._acquired = True
def write_pid_file(pid_file, pid): import fcntl import stat try: fd = os.open(pid_file, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR) except OSError as e: shell.print_exception(e) return -1 flags = fcntl.fcntl(fd, fcntl.F_GETFD) assert flags != -1 flags |= fcntl.FD_CLOEXEC r = fcntl.fcntl(fd, fcntl.F_SETFD, flags) assert r != -1 # There is no platform independent way to implement fcntl(fd, F_SETLK, &fl) # via fcntl.fcntl. So use lockf instead try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET) except IOError: r = os.read(fd, 32) if r: logging.error('already started at pid %s' % common.to_str(r)) else: logging.error('already started') os.close(fd) return -1 os.ftruncate(fd, 0) os.write(fd, common.to_bytes(str(pid))) return 0
def trylock_or_exit(self, timeout=10): interval = 0.1 n = int(timeout / interval) + 1 flag = fcntl.LOCK_EX | fcntl.LOCK_NB for ii in range(n): fd = os.open(self.lockfile, os.O_RDWR | os.O_CREAT) fcntl.fcntl(fd, fcntl.F_SETFD, fcntl.fcntl(fd, fcntl.F_GETFD, 0) | fcntl.FD_CLOEXEC) try: fcntl.lockf(fd, flag) self.lockfp = os.fdopen(fd, 'w+r') break except IOError as e: os.close(fd) if e[0] == errno.EAGAIN: time.sleep(interval) else: raise else: logger.info("Failure acquiring lock %s" % (self.lockfile, )) sys.exit(1) logger.info("OK acquired lock %s" % (self.lockfile))
def lock_file_nonblocking(fileobj): # Use fcntl.flock instead of fcntl.lockf. lockf on pypy 1.7 seems # to ignore existing locks. try: fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError, ioe: if ioe.errno not in (errno.EACCES, errno.EAGAIN): raise return False return True
def lockfile(filename): with open(filename, "wb") as opened: fd = opened.fileno() try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as ioe: if ioe.errno not in (errno.EACCES, errno.EAGAIN): raise yield False else: try: yield True finally: fcntl.flock(fd, fcntl.LOCK_UN)
def acquire(self): try: self._lock = open(self.lock_file, "w") flock(self._lock, LOCK_EX | LOCK_NB) logging.debug("Acquired exclusive lock on file: %s" % self.lock_file) return self._lock except Exception: logging.debug("Error acquiring lock on file: %s" % self.lock_file) if self._lock: self._lock.close() raise OperationError("Could not acquire lock on file: %s!" % self.lock_file)
def trylock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def __init__(self): self.fh = None self.is_running = False try: self.fh = open(LOCK_PATH, 'w') fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB) except EnvironmentError as err: if self.fh is not None: self.is_running = True else: raise
def openLocked(path, mode="w"): if os.name == "posix": import fcntl f = open(path, mode) fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) else: f = open(path, mode) return f
def write_pid_file(pid_file, pid): try: fd = os.open(pid_file, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR) except OSError as e: LOG.exception(e) return -1 flags = fcntl.fcntl(fd, fcntl.F_GETFD) assert flags != -1 flags |= fcntl.FD_CLOEXEC r = fcntl.fcntl(fd, fcntl.F_SETFD, flags) assert r != -1 # There is no platform independent way to implement fcntl(fd, F_SETLK, &fl) # via fcntl.fcntl. So use lockf instead try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET) except IOError: r = os.read(fd, 32) if r: logging.error('already started at pid %s' % utils.to_str(r)) else: logging.error('already started') os.close(fd) return -1 os.ftruncate(fd, 0) os.write(fd, utils.to_bytes(str(pid))) return 0
def _trylock(lockfile): fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def __init__(self, filename): self.filename = filename # This will create it if it does not exist already self.handle = open(filename, 'w') # Bitwise OR fcntl.LOCK_NB if you need a non-blocking lock
def _try_lock(self, fd): """Try to acquire the lock file without blocking. :param int fd: file descriptor of the opened file to lock """ try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as err: if err.errno in (errno.EACCES, errno.EAGAIN): logger.debug( "A lock on %s is held by another process.", self._path) raise errors.LockError( "Another instance of Certbot is already running.") raise
def run_chiboard(self): pass import subprocess from chi import board from chi.board import CHIBOARD_HOME, MAGIC_PORT port = None start = False cbc = join(CHIBOARD_HOME, CONFIG_NAME) if os.path.isfile(cbc): with open(cbc) as f: import fcntl try: fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) start = True fcntl.flock(f, fcntl.LOCK_UN) except (BlockingIOError, OSError): # chiboard is running try: data = json.load(f) port = data.get('port') except json.JSONDecodeError: port = None else: start = True if start: from chi.board import main chiboard = main.__file__ subprocess.check_call([sys.executable, chiboard, '--port', str(MAGIC_PORT), '--daemon']) port = MAGIC_PORT if port is None: logger.warning('chiboard seems to be running but port could not be read from its config') else: logger.info(f"{self.f.__name__} started. Check progress at http://localhost:{port}/exp/#/local{self.logdir}")
def __init__(self, path): import json self._path = path try: with open(path) as f: old_data = json.load(f) except json.JSONDecodeError: logger.warning('Could not decode config') old_data = {} except OSError: logger.debug('No config file') old_data = {} for i in range(10): try: self._f = open(path, 'w+') fcntl.flock(self._f, fcntl.LOCK_EX | fcntl.LOCK_NB) self._locked = True break except BlockingIOError: import signal pid = old_data.get('pid') if pid: logger.info(f'Config file is locked (try {i}). Killing previous instance {pid}') os.kill(pid, signal.SIGTERM) time.sleep(.05) else: logger.error(f'Config file is locked and no pid to kill') assert self._locked
def lock_file(fname): """Lock a file.""" import fcntl f = open(fname, mode='w') try: fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB) except Exception: return None return f
def __enter__(self): """Enter RunSingleInstance class :return: self """ self.__checked = True try: self.__filelock = open(self.__lockfile, 'w+') # None blocking lock fcntl.lockf(self.__filelock, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: if self.__filelock is not None: self.__is_running = True return self
def do_acquire_read_lock(self, wait): filedescriptor = self._open(os.O_CREAT | os.O_RDONLY) if not wait: try: fcntl.flock(filedescriptor, fcntl.LOCK_SH | fcntl.LOCK_NB) return True except IOError: os.close(filedescriptor) self._filedescriptor.remove() return False else: fcntl.flock(filedescriptor, fcntl.LOCK_SH) return True
def do_acquire_write_lock(self, wait): filedescriptor = self._open(os.O_CREAT | os.O_WRONLY) if not wait: try: fcntl.flock(filedescriptor, fcntl.LOCK_EX | fcntl.LOCK_NB) return True except IOError: os.close(filedescriptor) self._filedescriptor.remove() return False else: fcntl.flock(filedescriptor, fcntl.LOCK_EX) return True
def management_lock(view_func): def wrapper_lock(*args, **kwargs): try: lock_file_path = os.path.join('/tmp/', "{0}.lock".format(args[0].__class__.__module__.split('.')[-1])) f = open(lock_file_path, 'w') fcntl.lockf(f, fcntl.LOCK_EX + fcntl.LOCK_NB) except IOError: logging.debug("Process already is running.") os._exit(1) return view_func(*args, **kwargs) wrapper_lock.view_func = view_func.view_func if hasattr(view_func, 'view_func') else view_func return wrapper_lock
def trylock(fd): import fcntl import errno try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError, e: if e.errno in (errno.EACCES, errno.EAGAIN): return False else: raise return True
def _lock(self): if not fcntl or not self.ser: return fcntl.flock(self.ser.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) logging.debug('_lock')
def acquire_lock_or_exit(self, wait_for_seconds=10): lock_file = self.STATE_DIR + '/.iptables.lock' i = 0 f = open(lock_file, 'w+') while i < wait_for_seconds: try: fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) return except IOError: i += 1 time.sleep(1) Iptables.module.fail_json(msg="Could not acquire lock to continue execution! " "Probably another instance of this module is running.") # Check if a table has anything to flush (to check all tables pass table='*').
def unlockfile(lf): """ Unlock a file locked using lockfile() """ try: # If we had a shared lock, we need to promote to exclusive before # removing the lockfile. Attempt this, ignore failures. fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) os.unlink(lf.name) except (IOError, OSError): pass fcntl.flock(lf.fileno(), fcntl.LOCK_UN) lf.close()
def unlockfile(lock_file): """ Unlock a file locked using lockfile() """ try: # If we had a shared lock, we need to promote to exclusive before # removing the lockfile. Attempt this, ignore failures. fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB) os.unlink(lock_file.name) except (IOError, OSError): pass fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN) lock_file.close() #ENDOFCOPY