我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用fcntl.LOCK_EX。
def __enter__(self): if self.path is None: return self.pidfile self.pidfile = open(self.path, "a+") try: fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: self.pidfile = None raise SystemExit("Already running according to " + self.path) self.pidfile.seek(0) self.pidfile.truncate() self.pidfile.write(str(os.getpid())) self.pidfile.flush() self.pidfile.seek(0) return self.pidfile
def do_magic(self): if OS_WIN: try: if os.path.exists(LOCK_PATH): os.unlink(LOCK_PATH) self.fh = os.open(LOCK_PATH, os.O_CREAT | os.O_EXCL | os.O_RDWR) except EnvironmentError as err: if err.errno == 13: self.is_running = True else: raise else: try: self.fh = open(LOCK_PATH, 'w') fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB) except EnvironmentError as err: if self.fh is not None: self.is_running = True else: raise
def open(self): if self.opened: return self.lock = open(SETTINGS.CACHE_PATH + '.lock', 'ab') try: fcntl.flock(self.lock, fcntl.LOCK_EX | fcntl.LOCK_NB) mode = 'c' except IOError: logger.warn("Cache locked, using read-only") mode = 'r' self.lock.close() self.lock = None try: self.storage = shelve.open(SETTINGS.CACHE_PATH, mode) except Exception as e: if mode != 'c': raise logger.warn("Dropping corrupted cache on %s", e) self.lock.truncate(0) self.storage = shelve.open(SETTINGS.CACHE_PATH, mode) self.opened = True
def lock_method(lock_filename): ''' Use an OS lock such that a method can only be called once at a time. ''' def decorator(func): @functools.wraps(func) def lock_and_run_method(*args, **kwargs): # Only run this program if it's not already running # Snippet based on # http://linux.byexamples.com/archives/494/how-can-i-avoid-running-a-python-script-multiple-times-implement-file-locking/ fp = open(lock_filename, 'w') try: fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: raise SystemExit( "This program is already running. Please stop the current process or " + "remove " + lock_filename + " to run this script." ) return func(*args, **kwargs) return lock_and_run_method return decorator
def store(proxys): pidfile = open(proxyFilePath, "a") for i in range(10): try: fcntl.flock(pidfile, fcntl.LOCK_EX | fcntl.LOCK_NB) # LOCK_EX ???:???????????????????????? # LOCK_NB ????: ????????????????????????????????????? if type(proxys) == type([]): for proxy in proxys: pidfile.write(proxy + '\n') else: pidfile.write(proxys + '\n') pidfile.close() break except: # print "another instance is running..." time.sleep(3)
def push(self, message): """ Push a new message. """ if self.overflow_mode == 'drop': if self.max_length > -1 and self.get_length() >= self.max_length: return if self.max_size > -1 and self.get_size() >= self.max_size: return with open(self.file_path, 'a') as fd: # Let's hold an exclusive lock. fcntl.flock(fd, fcntl.LOCK_EX) fd.write(message.serialize()) fcntl.flock(fd, fcntl.LOCK_UN) fd.close() if self.overflow_mode == 'slide': if self.max_size == -1 and self.max_length > -1: while self.get_length() > self.max_length: self.shift() elif self.max_size > -1 and self.max_length == -1: while self.get_size() > self.max_size: self.shift()
def init_list(self, data=None): """ Initialize asid_list file. :param data: list or a string :return: """ file_path = os.path.join(running_path, '{sid}_list'.format(sid=self.sid)) if not os.path.exists(file_path): if isinstance(data, list): with open(file_path, 'w') as f: fcntl.flock(f, fcntl.LOCK_EX) f.write(json.dumps({ 'sids': {}, 'total_target_num': len(data), })) else: with open(file_path, 'w') as f: fcntl.flock(f, fcntl.LOCK_EX) f.write(json.dumps({ 'sids': {}, 'total_target_num': 1, }))
def list(self, data=None): """ Update asid_list file. :param data: tuple (s_sid, target) :return: """ file_path = os.path.join(running_path, '{sid}_list'.format(sid=self.sid)) if data is None: with open(file_path, 'r') as f: fcntl.flock(f, fcntl.LOCK_EX) result = f.readline() return json.loads(result) else: with open(file_path, 'r+') as f: # w+ causes a file reading bug fcntl.flock(f, fcntl.LOCK_EX) result = f.read() if result == '': result = {'sids': {}} else: result = json.loads(result) result['sids'][data[0]] = data[1] f.seek(0) f.truncate() f.write(json.dumps(result))
def lock_path(path, timeout=0): fd = os.open(path, os.O_CREAT) flags = fcntl.fcntl(fd, fcntl.F_GETFD, 0) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(fd, fcntl.F_SETFD, flags) started = time.time() while True: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: if started < time.time() - timeout: raise LockError("Couldn't obtain lock") else: break time.sleep(0.1) def unlock_path(): fcntl.flock(fd, fcntl.LOCK_UN) os.close(fd) return unlock_path
def cache(self,name,suffix=''): """Allocate a name in cache, if cache was setup also lock the file , so that another process have to wait before using the same file name Important: call unlock() on result """ #TODO: something more clever here? fname='' if self.work_dir is not None: fname=self.cache_dir+os.sep+name+suffix lock_name=fname+'.lock' f=self._locks[lock_name]=open(lock_name, 'a') fcntl.lockf(f.fileno(), fcntl.LOCK_EX ) else: fname=self.tmp(name+suffix) return fname
def edit_hosts(): f = os.popen('/usr/local/bin/etcdctl ls --sort --recursive /hosts') hosts_str = f.read() hosts_arr = hosts_str.strip('\n').split('\n') hosts_fd = open('/tmp/hosts', 'w') fcntl.flock(hosts_fd.fileno(), fcntl.LOCK_EX) hosts_fd.write('127.0.0.1 localhost cluster' + '\n') i = 0 for host_ip in hosts_arr: host_ip = host_ip[host_ip.rfind('/') + 1:] if host_ip[0] == '0': hosts_fd.write(host_ip[1:] + ' cluster-' + str(i) + '\n') else: hosts_fd.write(host_ip + ' cluster-' + str(i) + '\n') i += 1 hosts_fd.flush() os.system('/bin/cp /tmp/hosts /etc/hosts') hosts_fd.close()
def write_pid(path): """Writes our PID to *path*.""" try: pid = os.getpid() with io.open(path, mode='w', encoding='utf-8') as pidfile: # Get a non-blocking exclusive lock fcntl.flock(pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) pidfile.seek(0) pidfile.truncate(0) pidfile.write(unicode(pid)) except: logging.error(_("Could not write PID file: %s") % path) raise # This raises the original exception finally: try: pidfile.close() except: pass
def isSingleInstance(flavor_id=""): global fp basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace( "/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock' lockfile = os.path.normpath(tempfile.gettempdir() + '/' + basename) if sys.platform == 'win32': try: if os.path.exists(lockfile): os.unlink(lockfile) fp = os.open( lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) except OSError: return False else: # non Windows fp = open(lockfile, 'w') try: fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: return False return True
def acquire(self): if self.mutex_debug: print("I2C mutex acquire") acquired = False while not acquired: try: self.DexterLockI2C_handle = open('/run/lock/DexterLockI2C', 'w') # lock fcntl.lockf(self.DexterLockI2C_handle, fcntl.LOCK_EX | fcntl.LOCK_NB) acquired = True except IOError: # already locked by a different process time.sleep(0.001) except Exception as e: print(e) if self.mutex_debug: print("I2C mutex acquired {}".format(time.time()))
def lock(self): """ Locks the package to avoid concurrent operations on its shared resources. Currently, the only resource shared among scripts executed from different directories is the repository. """ if not self.locking_enabled: LOG.debug("This package has no shared resources to lock") return LOG.debug("Checking for lock on file {}.".format(self.lock_file_path)) self.lock_file = open(self.lock_file_path, "w") try: fcntl.lockf(self.lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as exc: RESOURCE_UNAVAILABLE_ERROR = 11 if exc.errno == RESOURCE_UNAVAILABLE_ERROR: LOG.info("Waiting for other process to finish operations " "on {}.".format(self.name)) else: raise fcntl.lockf(self.lock_file, fcntl.LOCK_EX)
def write_data(self): if self.config_newer_than_data(): self.read_data() return False else: try: self.data = {"clouds":self.quads.clouds.data, "hosts":self.quads.hosts.data, "history":self.quads.history.data, "cloud_history":self.quads.cloud_history.data} with open(self.config, 'w') as yaml_file: fcntl.flock(yaml_file, fcntl.LOCK_EX | fcntl.LOCK_NB) yaml_file.write(yaml.dump(self.data, default_flow_style=False)) fcntl.flock(yaml_file, fcntl.LOCK_UN) self.read_data() return True except Exception, ex: self.logger.error("There was a problem with your file %s" % ex) return False
def _try_lock(): """Check and create lock file - prevent running application twice. Return lock file handler. """ lock_file_path = _find_config_file("app.lock", False) _check_dir_for_file(lock_file_path) try: if fcntl is not None: lock_file = open(lock_file_path, "w") fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB) else: if os.path.isfile(lock_file_path): _LOG.error("another instance detected (lock file exists) " "- exiting") return None lock_file = open(lock_file_path, "w") return lock_file except IOError as err: import errno if err.errno == errno.EAGAIN: _LOG.error("another instance detected - exiting") else: _LOG.exception("locking failed: %s", err) return None
def getLock(self,lock_path,force=False,timeout=30,filename="easyops.lock"): import fcntl lockFile = os.path.join(lock_path,filename) #fp = open(lockFile,'w') try: if os.path.isfile(lockFile): os.chmod(lockFile, 0o777) except: pass self.fp[lockFile] = open(lockFile,'w') count = 0 while True: if count > timeout: return False count += 1 try: fcntl.flock(self.fp[lockFile],fcntl.LOCK_EX|fcntl.LOCK_NB) except IOError: if force == True: return True gevent.sleep(1) else: return True
def _lock(fileno): """Try to lock a file. Return True on success.""" # closing the file unlocks it, so we don't need to unlock here if platform.system() == 'Windows': try: msvcrt.locking(fileno, msvcrt.LK_NBLCK, 10) return True except PermissionError: return False else: try: fcntl.lockf(fileno, fcntl.LOCK_EX | fcntl.LOCK_NB) return True # the docs recommend catching both of these except (BlockingIOError, PermissionError): return False
def config_cache_lock(): """ Obtain the config cache lock, perform the code in the context and then let the lock go. :yield None :return None: """ with open(conf().CONFIG_CACHE_LOCK, 'w+') as lock: try: log.debug( "Getting config cache lock. " "If this blocks forever, try deleting file " "{} and restart the process.".format(conf().CONFIG_CACHE_LOCK) ) flock(lock, LOCK_EX) # Blocks until lock becomes available yield finally: log.debug("Releasing the config cache lock") flock(lock, LOCK_UN)
def is_tor_data_dir_usable(tor_data_dir): """ Checks if the Tor data dir specified is usable. This means that it is not being locked and we have permissions to write to it. """ if not os.path.exists(tor_data_dir): return True try: fcntl.flock(open(os.path.join(tor_data_dir, 'lock'), 'w'), fcntl.LOCK_EX | fcntl.LOCK_NB) return True except (IOError, OSError) as err: if err.errno == errno.EACCES: # Permission error return False elif err.errno == errno.EAGAIN: # File locked return False
def update(path): """ allow concurrent update of metadata """ p = os.path.join(path, "metadata.json") # we have to open writeable to get a lock with open(p, "a") as f: fcntl.lockf(f, fcntl.LOCK_EX) data = load(path) yield(data) save(path, data) fcntl.lockf(f, fcntl.LOCK_UN)
def main(): exit_status = 1 try: args = parse_args() # Make sure the exporter is only running once. lock_file = '/var/lock/{}.lock'.format(os.path.basename(sys.argv[0])) lock_fd = os.open(lock_file, os.O_CREAT) lock_success = False try: fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) lock_success = True except IOError: msg = 'Failed to export metrics, another instance is running.' syslog.syslog(syslog.LOG_INFO, msg) sys.stderr.write(msg + '\n') if lock_success: # Create a new registry, otherwise unwanted default collectors are # added automatically. registry = prometheus_client.CollectorRegistry() # Register our own collector and write metrics to STDOUT. registry.register(CephRgwCollector(**vars(args))) sys.stdout.write(prometheus_client.generate_latest(registry)) sys.stdout.flush() # Unlock the lock file. fcntl.flock(lock_fd, fcntl.LOCK_UN) exit_status = 0 except Exception as e: syslog.syslog(syslog.LOG_ERR, str(e)) # Cleanup os.close(lock_fd) if lock_success: try: os.unlink(lock_file) except: pass sys.exit(exit_status)
def _lockonly(file): _msg('got file #', file.fileno()) try: flock(file, LOCK_EX | LOCK_NB) except IOError: _msg('failed to lock') return False else: _msg('locked successfully') return True
def lock (self): ''' Creates and holds on to the lock file with exclusive access. Returns True if lock successful, False if it is not, and raises an exception upon operating system errors encountered creating the lock file. ''' try: # # Create or else open and trucate lock file, in read-write mode. # # A crashed app might not delete the lock file, so the # os.O_CREAT | os.O_EXCL combination that guarantees # atomic create isn't useful here. That is, we don't want to # fail locking just because the file exists. # # Could use os.O_EXLOCK, but that doesn't exist yet in my Python # self.lockfd = os.open (self.lockfile, os.O_TRUNC | os.O_CREAT | os.O_RDWR) # Acquire exclusive lock on the file, but don't block waiting for it fcntl.flock (self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) # Writing to file is pointless, nobody can see it os.write (self.lockfd, "My Lockfile") return True except (OSError, IOError), e: # Lock cannot be acquired is okay, everything else reraise exception if e.errno in (errno.EACCES, errno.EAGAIN): return False else: raise
def __enter__(self): self.pidfile = open(self.path, "a+") try: fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: raise SystemExit("Already running according to " + self.path) self.pidfile.seek(0) self.pidfile.truncate() self.pidfile.write(str(os.getpid())) self.pidfile.flush() self.pidfile.seek(0) return self.pidfile
def __init__(self, mutex_name): check_valid_mutex_name(mutex_name) filename = os.path.join(tempfile.gettempdir(), mutex_name) try: handle = open(filename, 'w') fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB) except: self._release_mutex = NULL self._acquired = False try: handle.close() except: pass else: def release_mutex(*args, **kwargs): # Note: can't use self here! if not getattr(release_mutex, 'called', False): release_mutex.called = True try: fcntl.flock(handle, fcntl.LOCK_UN) except: traceback.print_exc() try: handle.close() except: traceback.print_exc() try: # Removing is pretty much optional (but let's do it to keep the # filesystem cleaner). os.unlink(filename) except: pass # Don't use __del__: this approach doesn't have as many pitfalls. self._ref = weakref.ref(self, release_mutex) self._release_mutex = release_mutex self._acquired = True
def write_pid_file(pid_file, pid): import fcntl import stat try: fd = os.open(pid_file, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR) except OSError as e: shell.print_exception(e) return -1 flags = fcntl.fcntl(fd, fcntl.F_GETFD) assert flags != -1 flags |= fcntl.FD_CLOEXEC r = fcntl.fcntl(fd, fcntl.F_SETFD, flags) assert r != -1 # There is no platform independent way to implement fcntl(fd, F_SETLK, &fl) # via fcntl.fcntl. So use lockf instead try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET) except IOError: r = os.read(fd, 32) if r: logging.error('already started at pid %s' % common.to_str(r)) else: logging.error('already started') os.close(fd) return -1 os.ftruncate(fd, 0) os.write(fd, common.to_bytes(str(pid))) return 0
def trylock_or_exit(self, timeout=10): interval = 0.1 n = int(timeout / interval) + 1 flag = fcntl.LOCK_EX | fcntl.LOCK_NB for ii in range(n): fd = os.open(self.lockfile, os.O_RDWR | os.O_CREAT) fcntl.fcntl(fd, fcntl.F_SETFD, fcntl.fcntl(fd, fcntl.F_GETFD, 0) | fcntl.FD_CLOEXEC) try: fcntl.lockf(fd, flag) self.lockfp = os.fdopen(fd, 'w+r') break except IOError as e: os.close(fd) if e[0] == errno.EAGAIN: time.sleep(interval) else: raise else: logger.info("Failure acquiring lock %s" % (self.lockfile, )) sys.exit(1) logger.info("OK acquired lock %s" % (self.lockfile))
def lock_file_nonblocking(fileobj): # Use fcntl.flock instead of fcntl.lockf. lockf on pypy 1.7 seems # to ignore existing locks. try: fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError, ioe: if ioe.errno not in (errno.EACCES, errno.EAGAIN): raise return False return True
def lockfile(filename): with open(filename, "wb") as opened: fd = opened.fileno() try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as ioe: if ioe.errno not in (errno.EACCES, errno.EAGAIN): raise yield False else: try: yield True finally: fcntl.flock(fd, fcntl.LOCK_UN)
def acquire(self): try: self._lock = open(self.lock_file, "w") flock(self._lock, LOCK_EX | LOCK_NB) logging.debug("Acquired exclusive lock on file: %s" % self.lock_file) return self._lock except Exception: logging.debug("Error acquiring lock on file: %s" % self.lock_file) if self._lock: self._lock.close() raise OperationError("Could not acquire lock on file: %s!" % self.lock_file)
def trylock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def __init__(self): self.fh = None self.is_running = False try: self.fh = open(LOCK_PATH, 'w') fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB) except EnvironmentError as err: if self.fh is not None: self.is_running = True else: raise
def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def latent_plan(init,goal,mode): bits = np.concatenate((init,goal)) ###### preprocessing ################################################################ ## old code for caching... lock = problem(network("lock")) import fcntl try: with open(lock) as f: print("lockfile found!") fcntl.flock(f, fcntl.LOCK_SH) except FileNotFoundError: with open(lock,'wb') as f: fcntl.flock(f, fcntl.LOCK_EX) preprocess(bits) ###### do planning ############################################# sasp = problem(network("{}.sasp".format(action_type))) plan_raw = problem(network("{}.sasp.plan".format(action_type))) plan = problem(network("{}.{}.plan".format(action_type,mode))) echodo(["planner-scripts/limit.sh","-v", "-o",options[mode], "--","fd-sas-clean", sasp]) assert os.path.exists(plan_raw) echodo(["mv",plan_raw,plan]) out = echo_out(["lisp/parse-plan.bin",plan, *list(init.astype('str'))]) lines = out.splitlines() return np.array([ [ int(s) for s in l.split() ] for l in lines ])
def openLocked(path, mode="w"): if os.name == "posix": import fcntl f = open(path, mode) fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) else: f = open(path, mode) return f
def __init__(self, fd, keys, excl=True, lock=False, mod=1048573, _ctx_cleanup=None): self.fd = fd self.locked = False self.mode = fcntl.LOCK_EX if excl else fcntl.LOCK_SH self._ctx_cleanup = _ctx_cleanup # sort so locks are acquired in consistent order # guarantees no inter-process deadlocks locks = set(hash(key) % mod for key in keys) self.locks = tuple(sorted(locks)) if lock: self.lock()
def exclusive(self, *keys): self._update(fcntl.LOCK_EX)
def write_pid_file(pid_file, pid): try: fd = os.open(pid_file, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR) except OSError as e: LOG.exception(e) return -1 flags = fcntl.fcntl(fd, fcntl.F_GETFD) assert flags != -1 flags |= fcntl.FD_CLOEXEC r = fcntl.fcntl(fd, fcntl.F_SETFD, flags) assert r != -1 # There is no platform independent way to implement fcntl(fd, F_SETLK, &fl) # via fcntl.fcntl. So use lockf instead try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET) except IOError: r = os.read(fd, 32) if r: logging.error('already started at pid %s' % utils.to_str(r)) else: logging.error('already started') os.close(fd) return -1 os.ftruncate(fd, 0) os.write(fd, utils.to_bytes(str(pid))) return 0
def create_pid_file(self): fd = open(self._pid_file, 'wb') fcntl.lockf(fd, fcntl.LOCK_EX) timestamp = time.strftime('%Y-%m-%d %H:%M:%S') fd.write(self._pid + ";" + timestamp) fd.close() return True
def _trylock(lockfile): fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def __init__(self, filename, mode='rb'): self.filename = filename self.mode = mode self.file = None if 'r' in mode: self.file = open_file(filename, mode) lock(self.file, LOCK_SH) elif 'w' in mode or 'a' in mode: self.file = open_file(filename, mode.replace('w', 'a')) lock(self.file, LOCK_EX) if 'a' not in mode: self.file.seek(0) self.file.truncate(0) else: raise RuntimeError("invalid LockedFile(...,mode)")
def acquire(self): fcntl.flock(self.handle, fcntl.LOCK_EX)
def _try_lock(self, fd): """Try to acquire the lock file without blocking. :param int fd: file descriptor of the opened file to lock """ try: fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as err: if err.errno in (errno.EACCES, errno.EAGAIN): logger.debug( "A lock on %s is held by another process.", self._path) raise errors.LockError( "Another instance of Certbot is already running.") raise
def run(self): """ THIS IS BLOCKING! CALL IT AT LAST! """ with open(Job.lockFileName, 'w') as f: rv = fcntl.lockf(f.fileno(), fcntl.LOCK_EX) print("job {} is running.".format(os.getpid())) f.write(str(os.getpid()) + '\n') f.flush() self.action() fcntl.lockf(f.fileno(), fcntl.LOCK_UN)
def write_server_info(filename, port): pid = os.getpid() rank = MPI.COMM_WORLD.Get_rank() server_info = '{}:{}:{}:{}:{}'.format(LINE_TOKEN, rank, pid, port, LINE_TOKEN).strip() logger.debug("write_server_info: line %s, filename %s", server_info, filename) time.sleep(0.1 * rank) with open(filename, "a") as f: fcntl.lockf(f, fcntl.LOCK_EX) f.write(server_info + '\n') f.flush() os.fsync(f.fileno()) fcntl.lockf(f, fcntl.LOCK_UN) return server_info