我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用fcntl.lockf()。
def do_magic(self): if OS_WIN: try: if os.path.exists(LOCK_PATH): os.unlink(LOCK_PATH) self.fh = os.open(LOCK_PATH, os.O_CREAT | os.O_EXCL | os.O_RDWR) except EnvironmentError as err: if err.errno == 13: self.is_running = True else: raise else: try: self.fh = open(LOCK_PATH, 'w') fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB) except EnvironmentError as err: if self.fh is not None: self.is_running = True else: raise
def lock_method(lock_filename): ''' Use an OS lock such that a method can only be called once at a time. ''' def decorator(func): @functools.wraps(func) def lock_and_run_method(*args, **kwargs): # Only run this program if it's not already running # Snippet based on # http://linux.byexamples.com/archives/494/how-can-i-avoid-running-a-python-script-multiple-times-implement-file-locking/ fp = open(lock_filename, 'w') try: fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: raise SystemExit( "This program is already running. Please stop the current process or " + "remove " + lock_filename + " to run this script." ) return func(*args, **kwargs) return lock_and_run_method return decorator
def __init__(self, addr, conf, log, fd=None): if fd is None: try: st = os.stat(addr) except OSError as e: if e.args[0] != errno.ENOENT: raise else: if stat.S_ISSOCK(st.st_mode): os.remove(addr) else: raise ValueError("%r is not a socket" % addr) self.parent = os.getpid() super(UnixSocket, self).__init__(addr, conf, log, fd=fd) # each arbiter grabs a shared lock on the unix socket. fcntl.lockf(self.sock, fcntl.LOCK_SH | fcntl.LOCK_NB)
def cache(self,name,suffix=''): """Allocate a name in cache, if cache was setup also lock the file , so that another process have to wait before using the same file name Important: call unlock() on result """ #TODO: something more clever here? fname='' if self.work_dir is not None: fname=self.cache_dir+os.sep+name+suffix lock_name=fname+'.lock' f=self._locks[lock_name]=open(lock_name, 'a') fcntl.lockf(f.fileno(), fcntl.LOCK_EX ) else: fname=self.tmp(name+suffix) return fname
def unlock(self,fname): #TODO: something more clever here? lock_name=fname+'.lock' try: f=self._locks[lock_name] if f is not None: fcntl.lockf(f.fileno(), fcntl.LOCK_UN) f.close() del self._locks[lock_name] # try: # os.unlink(lock_name) # except OSError: #probably somebody else is blocking # pass except KeyError: pass #def __del__(self): #self.do_cleanup() # pass
def isSingleInstance(flavor_id=""): global fp basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace( "/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock' lockfile = os.path.normpath(tempfile.gettempdir() + '/' + basename) if sys.platform == 'win32': try: if os.path.exists(lockfile): os.unlink(lockfile) fp = os.open( lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) except OSError: return False else: # non Windows fp = open(lockfile, 'w') try: fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: return False return True
def acquire(self): if self.mutex_debug: print("I2C mutex acquire") acquired = False while not acquired: try: self.DexterLockI2C_handle = open('/run/lock/DexterLockI2C', 'w') # lock fcntl.lockf(self.DexterLockI2C_handle, fcntl.LOCK_EX | fcntl.LOCK_NB) acquired = True except IOError: # already locked by a different process time.sleep(0.001) except Exception as e: print(e) if self.mutex_debug: print("I2C mutex acquired {}".format(time.time()))
def lock(self): """ Locks the package to avoid concurrent operations on its shared resources. Currently, the only resource shared among scripts executed from different directories is the repository. """ if not self.locking_enabled: LOG.debug("This package has no shared resources to lock") return LOG.debug("Checking for lock on file {}.".format(self.lock_file_path)) self.lock_file = open(self.lock_file_path, "w") try: fcntl.lockf(self.lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as exc: RESOURCE_UNAVAILABLE_ERROR = 11 if exc.errno == RESOURCE_UNAVAILABLE_ERROR: LOG.info("Waiting for other process to finish operations " "on {}.".format(self.name)) else: raise fcntl.lockf(self.lock_file, fcntl.LOCK_EX)
def _try_lock(): """Check and create lock file - prevent running application twice. Return lock file handler. """ lock_file_path = _find_config_file("app.lock", False) _check_dir_for_file(lock_file_path) try: if fcntl is not None: lock_file = open(lock_file_path, "w") fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) else: if os.path.isfile(lock_file_path): _LOG.error("another instance detected (lock file exists) " "- exiting") return None lock_file = open(lock_file_path, "w") return lock_file except IOError as err: import errno if err.errno == errno.EAGAIN: _LOG.error("another instance detected - exiting") else: _LOG.exception("locking failed: %s", err) return None
def _lock(fileno): """Try to lock a file. Return True on success.""" # closing the file unlocks it, so we don't need to unlock here if platform.system() == 'Windows': try: msvcrt.locking(fileno, msvcrt.LK_NBLCK, 10) return True except PermissionError: return False else: try: fcntl.lockf(fileno, fcntl.LOCK_EX | fcntl.LOCK_NB) return True # the docs recommend catching both of these except (BlockingIOError, PermissionError): return False
def __del__(self): import sys import os if not self.initialized: return try: if sys.platform == 'win32': if hasattr(self, 'fd'): os.close(self.fd) os.unlink(self.lockfile) else: import fcntl fcntl.lockf(self.fp, fcntl.LOCK_UN) # os.close(self.fp) if os.path.isfile(self.lockfile): os.unlink(self.lockfile) except Exception as e: raise
def unlock(self): """Unlocks the LockFile.""" if self._fileobj: # To avoid race conditions with the next caller # waiting for the lock file, it is simply # truncated instead of removed. try: fcntl.lockf(self._fileobj, fcntl.LOCK_UN) self._fileobj.truncate(0) self._fileobj.close() self._lock.release() except EnvironmentError: # If fcntl, or the file operations returned # an exception, drop the lock. Do not catch # the exception that could escape from # releasing the lock. self._lock.release() raise finally: self._fileobj = None else: if self._provide_mutex: assert not self._lock.locked
def acquire_lock(path): """ little tool to do EAGAIN until lockfile released :param path: :return: path """ lock_file = open(path, 'w') while True: send_to_syslog("attempting to acquire lock %s" % path) try: fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) send_to_syslog("acquired lock %s" % path) return lock_file except IOError as e: send_to_syslog("failed to acquire lock %s because '%s' - waiting 1 second" % (path, e)) time.sleep(1)
def uidNumber_getnext(self): """Get the next available uidNumber for adding a new user. Locks uidNumber file, reads number. Returns (file descriptor, uidNumber). uidNumber_savenext() must be called once the uidNumber is used successfully.""" uid_num_file = os.open(rbconfig.file_uidNumber, os.O_RDWR) retries = 0 while 1: try: fcntl.lockf(uid_num_file, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: retries += 1 if retries == 20: raise RBFatalError( ('Could not lock uidNumber.txt file after 20 attempts.' 'Please try again!')) time.sleep(0.5) else: break num_uid = int(os.read(uid_num_file, 32)) return uid_num_file, num_uid
def update(path): """ allow concurrent update of metadata """ p = os.path.join(path, "metadata.json") # we have to open writeable to get a lock with open(p, "a") as f: fcntl.lockf(f, fcntl.LOCK_EX) data = load(path) yield(data) save(path, data) fcntl.lockf(f, fcntl.LOCK_UN)
def _unlock_file(f): """Unlock file f using lockf and dot locking.""" if fcntl: fcntl.lockf(f, fcntl.LOCK_UN) if os.path.exists(f.name + '.lock'): os.remove(f.name + '.lock')
def unlock_and_close(self): """Close and unlock the file using the fcntl.lockf primitive.""" if self._locked: fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN) self._locked = False if self._fh: self._fh.close()
def clean_up(self): # this is not really needed try: if self.fh is not None: if OS_WIN: os.close(self.fh) os.unlink(LOCK_PATH) else: fcntl.lockf(self.fh, fcntl.LOCK_UN) self.fh.close() # ??? os.unlink(LOCK_PATH) except Exception as err: # logger.exception(err) raise # for debugging porpuses, do not raise it on production
def write_pid_file(pid_file, pid): import fcntl import stat try: fd = os.open(pid_file, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR) except OSError as e: shell.print_exception(e) return -1 flags = fcntl.fcntl(fd, fcntl.F_GETFD) assert flags != -1 flags |= fcntl.FD_CLOEXEC r = fcntl.fcntl(fd, fcntl.F_SETFD, flags) assert r != -1 # There is no platform independent way to implement fcntl(fd, F_SETLK, &fl) # via fcntl.fcntl. So use lockf instead try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET) except IOError: r = os.read(fd, 32) if r: logging.error('already started at pid %s' % common.to_str(r)) else: logging.error('already started') os.close(fd) return -1 os.ftruncate(fd, 0) os.write(fd, common.to_bytes(str(pid))) return 0
def close(self): self.save() self.storage.close() if self.lock: fcntl.lockf(self.lock, fcntl.LOCK_UN) self.lock.close() os.unlink(self.lock.name) self.opened = False
def trylock_or_exit(self, timeout=10): interval = 0.1 n = int(timeout / interval) + 1 flag = fcntl.LOCK_EX | fcntl.LOCK_NB for ii in range(n): fd = os.open(self.lockfile, os.O_RDWR | os.O_CREAT) fcntl.fcntl(fd, fcntl.F_SETFD, fcntl.fcntl(fd, fcntl.F_GETFD, 0) | fcntl.FD_CLOEXEC) try: fcntl.lockf(fd, flag) self.lockfp = os.fdopen(fd, 'w+r') break except IOError as e: os.close(fd) if e[0] == errno.EAGAIN: time.sleep(interval) else: raise else: logger.info("Failure acquiring lock %s" % (self.lockfile, )) sys.exit(1) logger.info("OK acquired lock %s" % (self.lockfile))
def unlock(self): if self.lockfp is None: return fd = self.lockfp.fileno() fcntl.lockf(fd, fcntl.LOCK_UN) self.lockfp.close() self.lockfp = None
def trylock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
def __init__(self): self.fh = None self.is_running = False try: self.fh = open(LOCK_PATH, 'w') fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB) except EnvironmentError as err: if self.fh is not None: self.is_running = True else: raise
def __exit__(self, exc_type, exc_value, traceback): if self.fh is not None: fcntl.lockf(self.fh, fcntl.LOCK_UN) self.fh.close() os.unlink(LOCK_PATH)
def _update(self, mode): if mode is None: if self.locked: fcntl.lockf(self.fd, fcntl.LOCK_UN) self.locked = False elif self.mode is not mode or not self.locked: self.mode = mode self.locked = True for offset in self.locks: fcntl.lockf(self.fd, self.mode, 1, offset)
def write_pid_file(pid_file, pid): try: fd = os.open(pid_file, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR) except OSError as e: LOG.exception(e) return -1 flags = fcntl.fcntl(fd, fcntl.F_GETFD) assert flags != -1 flags |= fcntl.FD_CLOEXEC r = fcntl.fcntl(fd, fcntl.F_SETFD, flags) assert r != -1 # There is no platform independent way to implement fcntl(fd, F_SETLK, &fl) # via fcntl.fcntl. So use lockf instead try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET) except IOError: r = os.read(fd, 32) if r: logging.error('already started at pid %s' % utils.to_str(r)) else: logging.error('already started') os.close(fd) return -1 os.ftruncate(fd, 0) os.write(fd, utils.to_bytes(str(pid))) return 0
def create_pid_file(self): fd = open(self._pid_file, 'wb') fcntl.lockf(fd, fcntl.LOCK_EX) timestamp = time.strftime('%Y-%m-%d %H:%M:%S') fd.write(self._pid + ";" + timestamp) fd.close() return True
def _trylock(lockfile): fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def _unlock(lockfile): fcntl.lockf(lockfile, fcntl.LOCK_UN)
def _try_lock(self, fd): """Try to acquire the lock file without blocking. :param int fd: file descriptor of the opened file to lock """ try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as err: if err.errno in (errno.EACCES, errno.EAGAIN): logger.debug( "A lock on %s is held by another process.", self._path) raise errors.LockError( "Another instance of Certbot is already running.") raise
def run(self): """ THIS IS BLOCKING! CALL IT AT LAST! """ with open(Job.lockFileName, 'w') as f: rv = fcntl.lockf(f.fileno(), fcntl.LOCK_EX) print("job {} is running.".format(os.getpid())) f.write(str(os.getpid()) + '\n') f.flush() self.action() fcntl.lockf(f.fileno(), fcntl.LOCK_UN)
def write_server_info(filename, port): pid = os.getpid() rank = MPI.COMM_WORLD.Get_rank() server_info = '{}:{}:{}:{}:{}'.format(LINE_TOKEN, rank, pid, port, LINE_TOKEN).strip() logger.debug("write_server_info: line %s, filename %s", server_info, filename) time.sleep(0.1 * rank) with open(filename, "a") as f: fcntl.lockf(f, fcntl.LOCK_EX) f.write(server_info + '\n') f.flush() os.fsync(f.fileno()) fcntl.lockf(f, fcntl.LOCK_UN) return server_info
def get_rpc_port_by_rank(self, rank, num_servers): if self.mpirun_proc is None: raise RuntimeError("Launch mpirun_proc before reading of rpc ports") if self._rpc_ports is not None: return self._rpc_ports[rank] server_info_pattern = re.compile("^" + LINE_TOKEN + ":([\d]+):([\d]+):([\d]+):" + LINE_TOKEN + "$") self._tmpfile.seek(0) while True: fcntl.lockf(self._tmpfile, fcntl.LOCK_SH) line_count = sum(1 for line in self._tmpfile if server_info_pattern.match(line)) self._tmpfile.seek(0) fcntl.lockf(self._tmpfile, fcntl.LOCK_UN) if line_count == num_servers: break else: time.sleep(0.1) server_infos = [tuple([int(server_info_pattern.match(line).group(1)), int(server_info_pattern.match(line).group(3))]) for line in self._tmpfile] server_infos = sorted(server_infos, key=lambda x: x[0]) self._rpc_ports = [row[1] for row in server_infos] logger.debug("get_rpc_ports: ports (in MPI rank order): %s", self._rpc_ports) self._tmpfile.close() return self._rpc_ports[rank]
def _lock_file_posix(self, path, exclusive=True): lock_path = path + '.lock' if exclusive is True: f_lock = open(lock_path, 'w') fcntl.lockf(f_lock, fcntl.LOCK_EX) else: f_lock = open(lock_path, 'r') fcntl.lockf(f_lock, fcntl.LOCK_SH) if os.path.exists(lock_path) is False: f_lock.close() return None return f_lock
def lock_file(fname): """Lock a file.""" import fcntl f = open(fname, mode='w') try: fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB) except Exception: return None return f
def __enter__(self): """Enter RunSingleInstance class :return: self """ self.__checked = True try: self.__filelock = open(self.__lockfile, 'w+') # None blocking lock fcntl.lockf(self.__filelock, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: if self.__filelock is not None: self.__is_running = True return self
def __exit__(self, type, value, tb): # pylint: disable=redefined-builtin """Exit RunSingleInstance class :return: None """ try: if not self.__is_running: fcntl.lockf(self.__filelock, fcntl.LOCK_UN) self.__filelock.close() os.unlink(self.__lockfile) except Exception as err: logger.error("Error unlocking single instance file", error=err.message)
def management_lock(view_func): def wrapper_lock(*args, **kwargs): try: lock_file_path = os.path.join('/tmp/', "{0}.lock".format(args[0].__class__.__module__.split('.')[-1])) f = open(lock_file_path, 'w') fcntl.lockf(f, fcntl.LOCK_EX + fcntl.LOCK_NB) except IOError: logging.debug("Process already is running.") os._exit(1) return view_func(*args, **kwargs) wrapper_lock.view_func = view_func.view_func if hasattr(view_func, 'view_func') else view_func return wrapper_lock
def trylock(fd): import fcntl import errno try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError, e: if e.errno in (errno.EACCES, errno.EAGAIN): return False else: raise return True
def __init__(self, hid): self.file = open(hid, 'rb+', buffering=0) fcntl.lockf(self.file, fcntl.LOCK_EX) flag = fcntl.fcntl(self.file, fcntl.F_GETFL) fcntl.fcntl(self.file, fcntl.F_SETFL, flag | os.O_NONBLOCK) # Clock 48 MHz fcntl.ioctl(self.file, 0xC0054806, bytes([0xA1, 0x01, 0x02])) # No flow control mode fcntl.ioctl(self.file, 0xC0054806, bytes([0xA1, 0x03, 0x04])) self.gpio = [0x00, 0x00] self.read_buffer = [] self._write_gpio()
def connection_lock(self): f = self._play_context.connection_lockfd display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr) fcntl.lockf(f, fcntl.LOCK_EX) display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def connection_unlock(self): f = self._play_context.connection_lockfd fcntl.lockf(f, fcntl.LOCK_UN) display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def __enter__(self): # Lock the database interface (multi-threading) self._thread_lock.acquire() # Lock the database (multi-processing) fcntl.lockf(self._db_fd, fcntl.LOCK_EX)