我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用fcntl.LOCK_SH。
def get_content_all_messages(self): """ Get all messages """ try: buffers = list() with open(self.file_path, 'r') as fd: fcntl.flock(fd, fcntl.LOCK_SH) for row_msg in fd.readlines(): try: msg = self.parse_row_message(row_msg) buffers.append(json.loads(msg.content)) except Exception: pass fcntl.flock(fd, fcntl.LOCK_UN) return buffers except Exception: return
def __init__(self, addr, conf, log, fd=None): if fd is None: try: st = os.stat(addr) except OSError as e: if e.args[0] != errno.ENOENT: raise else: if stat.S_ISSOCK(st.st_mode): os.remove(addr) else: raise ValueError("%r is not a socket" % addr) self.parent = os.getpid() super(UnixSocket, self).__init__(addr, conf, log, fd=fd) # each arbiter grabs a shared lock on the unix socket. fcntl.lockf(self.sock, fcntl.LOCK_SH | fcntl.LOCK_NB)
def _lock_file(f, exclusive): fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def latent_plan(init,goal,mode): bits = np.concatenate((init,goal)) ###### preprocessing ################################################################ ## old code for caching... lock = problem(network("lock")) import fcntl try: with open(lock) as f: print("lockfile found!") fcntl.flock(f, fcntl.LOCK_SH) except FileNotFoundError: with open(lock,'wb') as f: fcntl.flock(f, fcntl.LOCK_EX) preprocess(bits) ###### do planning ############################################# sasp = problem(network("{}.sasp".format(action_type))) plan_raw = problem(network("{}.sasp.plan".format(action_type))) plan = problem(network("{}.{}.plan".format(action_type,mode))) echodo(["planner-scripts/limit.sh","-v", "-o",options[mode], "--","fd-sas-clean", sasp]) assert os.path.exists(plan_raw) echodo(["mv",plan_raw,plan]) out = echo_out(["lisp/parse-plan.bin",plan, *list(init.astype('str'))]) lines = out.splitlines() return np.array([ [ int(s) for s in l.split() ] for l in lines ])
def __init__(self, fd, keys, excl=True, lock=False, mod=1048573, _ctx_cleanup=None): self.fd = fd self.locked = False self.mode = fcntl.LOCK_EX if excl else fcntl.LOCK_SH self._ctx_cleanup = _ctx_cleanup # sort so locks are acquired in consistent order # guarantees no inter-process deadlocks locks = set(hash(key) % mod for key in keys) self.locks = tuple(sorted(locks)) if lock: self.lock()
def shared(self): self._update(fcntl.LOCK_SH)
def __init__(self, filename, mode='rb'): self.filename = filename self.mode = mode self.file = None if 'r' in mode: self.file = open_file(filename, mode) lock(self.file, LOCK_SH) elif 'w' in mode or 'a' in mode: self.file = open_file(filename, mode.replace('w', 'a')) lock(self.file, LOCK_EX) if 'a' not in mode: self.file.seek(0) self.file.truncate(0) else: raise RuntimeError("invalid LockedFile(...,mode)")
def get_rpc_port_by_rank(self, rank, num_servers): if self.mpirun_proc is None: raise RuntimeError("Launch mpirun_proc before reading of rpc ports") if self._rpc_ports is not None: return self._rpc_ports[rank] server_info_pattern = re.compile("^" + LINE_TOKEN + ":([\d]+):([\d]+):([\d]+):" + LINE_TOKEN + "$") self._tmpfile.seek(0) while True: fcntl.lockf(self._tmpfile, fcntl.LOCK_SH) line_count = sum(1 for line in self._tmpfile if server_info_pattern.match(line)) self._tmpfile.seek(0) fcntl.lockf(self._tmpfile, fcntl.LOCK_UN) if line_count == num_servers: break else: time.sleep(0.1) server_infos = [tuple([int(server_info_pattern.match(line).group(1)), int(server_info_pattern.match(line).group(3))]) for line in self._tmpfile] server_infos = sorted(server_infos, key=lambda x: x[0]) self._rpc_ports = [row[1] for row in server_infos] logger.debug("get_rpc_ports: ports (in MPI rank order): %s", self._rpc_ports) self._tmpfile.close() return self._rpc_ports[rank]
def _lock_file_posix(self, path, exclusive=True): lock_path = path + '.lock' if exclusive is True: f_lock = open(lock_path, 'w') fcntl.lockf(f_lock, fcntl.LOCK_EX) else: f_lock = open(lock_path, 'r') fcntl.lockf(f_lock, fcntl.LOCK_SH) if os.path.exists(lock_path) is False: f_lock.close() return None return f_lock
def do_acquire_read_lock(self, wait): filedescriptor = self._open(os.O_CREAT | os.O_RDONLY) if not wait: try: fcntl.flock(filedescriptor, fcntl.LOCK_SH | fcntl.LOCK_NB) return True except IOError: os.close(filedescriptor) self._filedescriptor.remove() return False else: fcntl.flock(filedescriptor, fcntl.LOCK_SH) return True
def __init__(self, filename, mode='rb'): self.filename = filename self.mode = mode self.file = None if 'r' in mode: self.file = open(filename, mode) lock(self.file, LOCK_SH) elif 'w' in mode or 'a' in mode: self.file = open(filename, mode.replace('w', 'a')) lock(self.file, LOCK_EX) if not 'a' in mode: self.file.seek(0) self.file.truncate(0) else: raise RuntimeError("invalid LockedFile(...,mode)")
def get_length(self,): """ Read the whole queue file and return the number of lines. """ try: with open(self.file_path, 'r') as fd: fcntl.flock(fd, fcntl.LOCK_SH) n = 0 for _ in fd.readlines(): n += 1 fcntl.flock(fd, fcntl.LOCK_UN) return n except Exception: return 0
def get_size(self): """ Return queue file size. """ try: with open(self.file_path, 'r') as fd: fcntl.flock(fd, fcntl.LOCK_SH) size = os.fstat(fd.fileno()).st_size fcntl.flock(fd, fcntl.LOCK_UN) return size except Exception: return 0
def __init__(self, filename, mode='rb'): self.filename = filename self.mode = mode self.file = None if 'r' in mode: self.file = open(filename, mode) lock(self.file, LOCK_SH) elif 'w' in mode or 'a' in mode: self.file = open(filename, mode.replace('w', 'a')) lock(self.file, LOCK_EX) if not 'a' in mode: self.file.seek(0) self.file.truncate() else: raise RuntimeError("invalid LockedFile(...,mode)")
def _lock_buildroot(self, exclusive): lock_type = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH if not self._lock_file: self._open_lock() try: fcntl.lockf(self._lock_file.fileno(), lock_type | fcntl.LOCK_NB) except IOError: raise BuildRootLocked("Build root is locked by another process.")
def lock(self, exclusive, block=False): lock_type = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH try: fcntl.lockf(self.lock_file.fileno(), lock_type | (0 if block else fcntl.LOCK_NB)) except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): raise LvmLocked("LVM is locked") raise
def _rootCacheLock(self, shared=1): lockType = fcntl.LOCK_EX if shared: lockType = fcntl.LOCK_SH try: fcntl.lockf(self.rootCacheLock.fileno(), lockType | fcntl.LOCK_NB) except IOError: self.state.start("Waiting for rootcache lock") fcntl.lockf(self.rootCacheLock.fileno(), lockType) self.state.finish("Waiting for rootcache lock")
def start(): if os.path.exists(settings.pid_file): with open(settings.pid_file) as f: try: fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) fcntl.flock(f, fcntl.LOCK_UN) except IOError: sys.stdout.write("zmapd is already started\n") return run_daemon_process(pid_file=settings.pid_file, start_msg="Start zmapd(%s)\n") pid_file = open(settings.pid_file) fcntl.flock(pid_file, fcntl.LOCK_SH) while True: time.sleep(1) running_jobs = Job.objects.filter(status=Job.STATUS_RUNNING) total_bandwidth = 0 for job in running_jobs: total_bandwidth += job.bandwidth if total_bandwidth >= settings.max_bandwidth: logger.debug(u"Achieve maximum bandwidth:%sM", settings.max_bandwidth) continue jobs = [x for x in Job.objects.filter(status=Job.STATUS_PENDING).order_by('-priority')] db.close_old_connections() for j in jobs: p = multiprocessing.Process(target=execute_job, args=(j.id,)) p.start()
def lock(self, cmd, owner, **kw): #return -EROFS # The code here is much rather just a demonstration of the locking # API than something which actually was seen to be useful. # Advisory file locking is pretty messy in Unix, and the Python # interface to this doesn't make it better. # We can't do fcntl(2)/F_GETLK from Python in a platfrom independent # way. The following implementation *might* work under Linux. # # if cmd == fcntl.F_GETLK: # import struct # # lockdata = struct.pack('hhQQi', kw['l_type'], os.SEEK_SET, # kw['l_start'], kw['l_len'], kw['l_pid']) # ld2 = fcntl.fcntl(self.fd, fcntl.F_GETLK, lockdata) # flockfields = ('l_type', 'l_whence', 'l_start', 'l_len', 'l_pid') # uld2 = struct.unpack('hhQQi', ld2) # res = {} # for i in xrange(len(uld2)): # res[flockfields[i]] = uld2[i] # # return fuse.Flock(**res) # Convert fcntl-ish lock parameters to Python's weird # lockf(3)/flock(2) medley locking API... op = { fcntl.F_UNLCK : fcntl.LOCK_UN, fcntl.F_RDLCK : fcntl.LOCK_SH, fcntl.F_WRLCK : fcntl.LOCK_EX }[kw['l_type']] if cmd == fcntl.F_GETLK: return -EOPNOTSUPP elif cmd == fcntl.F_SETLK: if op != fcntl.LOCK_UN: op |= fcntl.LOCK_NB elif cmd == fcntl.F_SETLKW: pass else: return -EINVAL fcntl.lockf(self.fd, op, kw['l_start'], kw['l_len'])
def flopen(*args, **kwargs): ''' Shortcut for fopen with lock and context manager ''' with fopen(*args, **kwargs) as fhandle: try: if is_fcntl_available(check_sunos=True): fcntl.flock(fhandle.fileno(), fcntl.LOCK_SH) yield fhandle finally: if is_fcntl_available(check_sunos=True): fcntl.flock(fhandle.fileno(), fcntl.LOCK_UN)
def _acquire(self, exclusive=True, blocking=True): flag = fcntl.LOCK_SH if exclusive: flag = fcntl.LOCK_EX if not blocking: flag |= fcntl.LOCK_NB try: logger.debug("Acquiring lock on {}".format(self.filename)) fcntl.flock(self.handle, flag) except IOError as e: if e.errno == errno.EAGAIN: raise AlreadyLocked() self._locked = True return self
def __lock(self, *, write: bool=True): """ A context manager to lock the storage for reading or writing. If no processes hold the lock for writing, then several processes can simultaneously lock the storage for reading. If a process holds the lock for writing, then no other process can take the lock (either for reading or writing). If the lock cannot be obtained, the process is paused until the situation allows to take it. Args: write -- (bool) if False, the storage is locked for reading. If True, the storage is locked for writing. """ assert not self.__locked, 'Nested lock' if self.__lockable: flags = fcntl.LOCK_SH if not write else fcntl.LOCK_EX fcntl.lockf(self.__file, flags, 1) self.__locked = True try: yield finally: fcntl.lockf(self.__file, fcntl.LOCK_UN, 1) self.__locked = False else: yield
def get_last_n_messages(self, n): """ Generator intended to return the last n messages from the queue. As far as the last records are located at the end of the file, we read the file backwards until the number of desired lines is reached or the whole file has been read. -1 means no limit. """ buf_size = 8192 try: with open(self.file_path, 'r') as fd: fcntl.flock(fd, fcntl.LOCK_SH) segment = None offset = 0 n_line = 0 # Move to the EOF fd.seek(0, os.SEEK_END) # Get file size using tell() file_size = total_size = remaining_size = fd.tell() while (remaining_size > 0): offset = min(total_size, offset + buf_size) # Move pointer to the next position. fd.seek(file_size - offset) # Read a chunk into the buffer. buffer = fd.read(min(remaining_size, buf_size)) remaining_size -= buf_size # Split buffer content by EOL. lines = buffer.split('\n') if segment is not None: # Case when we need to concatenate the first uncomplete # line of the last loop iter. with the last one of this # current iteration. if buffer[-1] is not '\n': lines[-1] += segment else: n_line += 1 if (n > -1 and n_line > n): fcntl.flock(fd, fcntl.LOCK_UN) break yield json.loads( self.parse_row_message(segment).content) segment = lines[0] # Read each line. for idx in range(len(lines) - 1, 0, -1): if len(lines[idx]): n_line += 1 if (n > -1 and n_line > n): fcntl.flock(fd, fcntl.LOCK_UN) break yield json.loads( self.parse_row_message(lines[idx]).content) if segment is not None: yield json.loads(self.parse_row_message(segment).content) fcntl.flock(fd, fcntl.LOCK_UN) except Exception: return
def lockfile(name, shared=False, retry=True, block=False): """ Use the specified file as a lock file, return when the lock has been acquired. Returns a variable to pass to unlockfile(). Parameters: retry: True to re-try locking if it fails, False otherwise block: True to block until the lock succeeds, False otherwise The retry and block parameters are kind of equivalent unless you consider the possibility of sending a signal to the process to break out - at which point you want block=True rather than retry=True. """ dirname = os.path.dirname(name) mkdirhier(dirname) if not os.access(dirname, os.W_OK): logger.error("Unable to acquire lock '%s', directory is not writable", name) sys.exit(1) op = fcntl.LOCK_EX if shared: op = fcntl.LOCK_SH if not retry and not block: op = op | fcntl.LOCK_NB while True: # If we leave the lockfiles lying around there is no problem # but we should clean up after ourselves. This gives potential # for races though. To work around this, when we acquire the lock # we check the file we locked was still the lock file on disk. # by comparing inode numbers. If they don't match or the lockfile # no longer exists, we start again. # This implementation is unfair since the last person to request the # lock is the most likely to win it. try: lf = open(name, 'a+') fileno = lf.fileno() fcntl.flock(fileno, op) statinfo = os.fstat(fileno) if os.path.exists(lf.name): statinfo2 = os.stat(lf.name) if statinfo.st_ino == statinfo2.st_ino: return lf lf.close() except Exception: try: lf.close() except Exception: pass pass if not retry: return None
def lockfile(name, shared=False, retry=True): """ Use the file fn as a lock file, return when the lock has been acquired. Returns a variable to pass to unlockfile(). """ config.logger.debug("take lockfile %s", name) dirname = os.path.dirname(name) mkdirhier(dirname) if not os.access(dirname, os.W_OK): logger.error("Unable to acquire lock '%s', directory is not writable", name) sys.exit(1) operation = fcntl.LOCK_EX if shared: operation = fcntl.LOCK_SH if not retry: operation = operation | fcntl.LOCK_NB while True: # If we leave the lockfiles lying around there is no problem # but we should clean up after ourselves. This gives potential # for races though. To work around this, when we acquire the lock # we check the file we locked was still the lock file on disk. # by comparing inode numbers. If they don't match or the lockfile # no longer exists, we start again. # This implementation is unfair since the last person to request the # lock is the most likely to win it. # pylint: disable=broad-except # we disable the broad-except because we want to actually catch all possible exceptions try: lock_file = open(name, 'a+') fileno = lock_file.fileno() fcntl.flock(fileno, operation) statinfo = os.fstat(fileno) if os.path.exists(lock_file.name): statinfo2 = os.stat(lock_file.name) if statinfo.st_ino == statinfo2.st_ino: return lock_file lock_file.close() except Exception as exc: try: lock_file.close() except Exception as exc2: config.logger.error("Failed to close the lockfile: %s", exc2) config.logger.error("Failed to acquire the lockfile: %s", exc) if not retry: return None