我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用log.Log()。
def next(self): """Return next elem, add to cache. StopIteration passed upwards""" next_elem = self.iter.next() next_index = next_elem.index self.cache_dict[next_index] = next_elem self.cache_indicies.append(next_index) if len(self.cache_indicies) > self.cache_size: try: del self.cache_dict[self.cache_indicies[0]] except KeyError: log.Log("Warning: index %s missing from iterator cache" % (self.cache_indicies[0],), 2) del self.cache_indicies[0] return next_elem
def check_common_error(error_handler, function, args = []): """Apply function to args, if error, run error_handler on exception This uses the catch_error predicate below to only catch certain exceptions which seems innocent enough. """ try: return function(*args) except (Exception, KeyboardInterrupt, SystemExit), exc: TracebackArchive.add([function] + list(args)) if catch_error(exc): log.Log.exception() conn = Globals.backup_writer if conn is not None: conn.statistics.record_error() if error_handler: return error_handler(exc, *args) else: return None if is_routine_fatal(exc): log.Log.exception(1, 6) else: log.Log.exception(1, 2) raise
def copy_reg_file(rpin, rpout, compress = 0): """Copy regular file rpin to rpout, possibly avoiding connection""" try: if (rpout.conn is rpin.conn and rpout.conn is not Globals.local_connection): v = rpout.conn.rpath.copy_reg_file(rpin.path, rpout.path, compress) rpout.setdata() return v except AttributeError: pass try: return rpout.write_from_fileobj(rpin.open("rb"), compress = compress) except IOError, e: if (e.errno == errno.ERANGE): log.Log.FatalError("'IOError - Result too large' while reading %s. " "If you are using a Mac, this is probably " "the result of HFS+ filesystem corruption. " "Please exclude this file from your backup " "before proceeding." % rpin.path) else: raise
def copy_attribs(rpin, rpout): """Change file attributes of rpout to match rpin Only changes the chmoddable bits, uid/gid ownership, and timestamps, so both must already exist. """ log.Log("Copying attributes from %s to %s" % (rpin.index, rpout.path), 7) assert rpin.lstat() == rpout.lstat() or rpin.isspecial() if Globals.change_ownership: rpout.chown(*rpout.conn.user_group.map_rpath(rpin)) if Globals.eas_write: rpout.write_ea(rpin.get_ea()) if rpin.issym(): return # symlinks don't have times or perms if (Globals.resource_forks_write and rpin.isreg() and rpin.has_resource_fork()): rpout.write_resource_fork(rpin.get_resource_fork()) if (Globals.carbonfile_write and rpin.isreg() and rpin.has_carbonfile()): rpout.write_carbonfile(rpin.get_carbonfile()) rpout.chmod(rpin.getperms()) if Globals.acls_write: rpout.write_acl(rpin.get_acl()) if not rpin.isdev(): rpout.setmtime(rpin.getmtime()) if Globals.win_acls_write: rpout.write_win_acl(rpin.get_win_acl())
def copy_attribs_inc(rpin, rpout): """Change file attributes of rpout to match rpin Like above, but used to give increments the same attributes as the originals. Therefore, don't copy all directory acl and permissions. """ log.Log("Copying inc attrs from %s to %s" % (rpin.index, rpout.path), 7) check_for_files(rpin, rpout) if Globals.change_ownership: apply(rpout.chown, rpin.getuidgid()) if Globals.eas_write: rpout.write_ea(rpin.get_ea()) if rpin.issym(): return # symlinks don't have times or perms if (Globals.resource_forks_write and rpin.isreg() and rpin.has_resource_fork() and rpout.isreg()): rpout.write_resource_fork(rpin.get_resource_fork()) if (Globals.carbonfile_write and rpin.isreg() and rpin.has_carbonfile() and rpout.isreg()): rpout.write_carbonfile(rpin.get_carbonfile()) if rpin.isdir() and not rpout.isdir(): rpout.chmod(rpin.getperms() & 0777) else: rpout.chmod(rpin.getperms()) if Globals.acls_write: rpout.write_acl(rpin.get_acl(), map_names = 0) if not rpin.isdev(): rpout.setmtime(rpin.getmtime())
def chmod(self, permissions, loglevel = 2): """Wrapper around os.chmod""" try: self.conn.os.chmod(self.path, permissions & Globals.permission_mask) except OSError, e: if e.strerror == "Inappropriate file type or format" \ and not self.isdir(): # Some systems throw this error if try to set sticky bit # on a non-directory. Remove sticky bit and try again. log.Log("Warning: Unable to set permissions of %s to %o - " "trying again without sticky bit (%o)" % (self.path, permissions, permissions & 06777), loglevel) self.conn.os.chmod(self.path, permissions & 06777 & Globals.permission_mask) else: raise self.data['perms'] = permissions
def makedev(self, type, major, minor): """Make a special file with specified type, and major/minor nums""" if type == 'c': datatype = 'chr' mode = stat.S_IFCHR | 0600 elif type == 'b': datatype = 'blk' mode = stat.S_IFBLK | 0600 else: raise RPathException try: self.conn.os.mknod(self.path, mode, self.conn.os.makedev(major, minor)) except (OSError, AttributeError), e: if isinstance(e, AttributeError) or e.errno == errno.EPERM: # AttributeError will be raised by Python 2.2, which # doesn't have os.mknod log.Log("unable to mknod %s -- using touch instead" % self.path, 4) self.touch() self.setdata()
def write_carbonfile(self, cfile): """Write new carbon data to self.""" if not cfile: return log.Log("Writing carbon data to %s" % (self.index,), 7) from Carbon.File import FSSpec from Carbon.File import FSRef import Carbon.Files import MacOS fsobj = FSSpec(self.path) finderinfo = fsobj.FSpGetFInfo() finderinfo.Creator = cfile['creator'] finderinfo.Type = cfile['type'] finderinfo.Location = cfile['location'] finderinfo.Flags = cfile['flags'] fsobj.FSpSetFInfo(finderinfo) """Write Creation Date to self (if stored in metadata).""" try: cdate = cfile['createDate'] fsref = FSRef(fsobj) cataloginfo, d1, d2, d3 = fsref.FSGetCatalogInfo(Carbon.Files.kFSCatInfoCreateDate) cataloginfo.createDate = (0, cdate, 0) fsref.FSSetCatalogInfo(Carbon.Files.kFSCatInfoCreateDate, cataloginfo) self.set_carbonfile(cfile) except KeyError: self.set_carbonfile(cfile)
def Increment(new, mirror, incpref): """Main file incrementing function, returns inc file created new is the file on the active partition, mirror is the mirrored file from the last backup, incpref is the prefix of the increment file. This function basically moves the information about the mirror file to incpref. """ log.Log("Incrementing mirror file " + mirror.path, 5) if ((new and new.isdir()) or mirror.isdir()) and not incpref.lstat(): incpref.mkdir() if not mirror.lstat(): incrp = makemissing(incpref) elif mirror.isdir(): incrp = makedir(mirror, incpref) elif new.isreg() and mirror.isreg(): incrp = makediff(new, mirror, incpref) else: incrp = makesnapshot(mirror, incpref) statistics.process_increment(incrp) return incrp
def filelist_globbing_get_sfs(self, filelist_fp, inc_default, list_name): """Return list of selection functions by reading fileobj filelist_fp should be an open file object inc_default is true iff this is an include list list_name is just the name of the list, used for logging See the man page on --[include/exclude]-globbing-filelist """ log.Log("Reading globbing filelist %s" % list_name, 4) separator = Globals.null_separator and "\0" or "\n" for line in filelist_fp.read().split(separator): if not line: continue # skip blanks if line[:2] == "+ ": yield self.glob_get_sf(line[2:], 1) elif line[:2] == "- ": yield self.glob_get_sf(line[2:], 0) else: yield self.glob_get_sf(line, inc_default)
def compare_hash(cls, repo_iter): """Like above, but also compare sha1 sums of any regular files""" def hashes_changed(src_rp, mir_rorp): """Return 0 if their data hashes same, 1 otherwise""" if not mir_rorp.has_sha1(): log.Log("Warning: Metadata file has no digest for %s, " "unable to compare." % (mir_rorp.get_indexpath(),), 2) return 0 elif (src_rp.getsize() == mir_rorp.getsize() and hash.compute_sha1(src_rp) == mir_rorp.get_sha1()): return 0 return 1 src_iter = cls.get_source_select() for src_rp, mir_rorp in rorpiter.Collate2Iters(src_iter, repo_iter): report = get_basic_report(src_rp, mir_rorp, hashes_changed) if report: yield report else: log_success(src_rp, mir_rorp)
def compare_full(cls, src_root, repo_iter): """Given repo iter with full data attached, return report iter""" def error_handler(exc, src_rp, repo_rorp): log.Log("Error reading file %s" % (src_rp.path,), 2) return 0 # They aren't the same if we get an error def data_changed(src_rp, repo_rorp): """Return 0 if full compare of data matches, 1 otherwise""" if src_rp.getsize() != repo_rorp.getsize(): return 1 return not robust.check_common_error(error_handler, rpath.cmp, (src_rp, repo_rorp)) for repo_rorp in repo_iter: src_rp = src_root.new_index(repo_rorp.index) report = get_basic_report(src_rp, repo_rorp, data_changed) if report: yield report else: log_success(repo_rorp)
def startBattle(self, spot, fleet, formation): try: data = self.conn.get('/pve/dealto/%d/%d/%d/' % (spot, fleet.id, formation), param='', headers=self.conn.getHeader, server=self.conn.getHeader.get('Host')) if data == -1: self.Log.i("Connection Error!") return -1 if 'warReport' in data: selfHp = data['warReport']['hpBeforeNightWarSelf'] enemyHp = data['warReport']['hpBeforeNightWarEnemy'] else: selfHp = 0 enemyHp = 0 lastSpot = (int(data['pveLevelEnd']) == 1) return selfHp, enemyHp, lastSpot except: return -1
def __init__(self, tool_name, tool_version): self.tool_name = tool_name self.tool_version = tool_version self.terminateFlingOnException = False self.env = argparse.Namespace() self.params = argparse.Namespace() self.key_data = None self.vinfo = None self.log = Log(self.tool_name, self.tool_version) self.log.open() self._init_parser()
def FillInIter(rpiter, rootrp): """Given ordered rpiter and rootrp, fill in missing indicies with rpaths For instance, suppose rpiter contains rpaths with indicies (), (1,2), (2,5). Then return iter with rpaths (), (1,), (1,2), (2,), (2,5). This is used when we need to process directories before or after processing a file in that directory. """ # Handle first element as special case first_rp = rpiter.next() # StopIteration gets passed upwards cur_index = first_rp.index for i in range(len(cur_index)): yield rootrp.new_index(cur_index[:i]) yield first_rp del first_rp old_index = cur_index # Now do all the other elements for rp in rpiter: cur_index = rp.index if not cur_index[:-1] == old_index[:-1]: # Handle special case quickly for i in range(1, len(cur_index)): # i==0 case already handled if cur_index[:i] != old_index[:i]: filler_rp = rootrp.new_index(cur_index[:i]) if not filler_rp.isdir(): log.Log("Warning: expected %s to be a directory but " "found %s instead.\nThis is probably caused " "by a bug in versions 1.0.0 and earlier." % (filler_rp.path, filler_rp.lstat()), 2) filler_rp.make_zero_dir(rootrp) yield filler_rp yield rp old_index = cur_index
def __call__(self, *args): """Process args, where args[0] is current position in iterator Returns true if args successfully processed, false if index is not in the current tree and thus the final result is available. Also note below we set self.index after doing the necessary start processing, in case there is a crash in the middle. """ index = args[0] if self.index is None: self.root_branch.base_index = index if self.root_branch.can_fast_process(*args): self.root_branch.fast_process(*args) self.root_fast_processed = 1 else: self.root_branch.start_process(*args) self.index = index return 1 if index == self.index: log.Log("Warning, repeated index %s, bad filesystem?" % (index,), 2) elif index < self.index: assert 0, "Bad index order: %s >= %s" % (self.index, index) else: # normal case if self.finish_branches(index) is None: return None # We are no longer in the main tree last_branch = self.branches[-1] if last_branch.can_fast_process(*args): last_branch.fast_process(*args) else: branch = self.add_branch(index) branch.start_process(*args) self.index = index return 1
def get_signature(rp, blocksize = None): """Take signature of rpin file and return in file object""" if not blocksize: blocksize = find_blocksize(rp.getsize()) log.Log("Getting signature of %s with blocksize %s" % (rp.get_indexpath(), blocksize), 7) return librsync.SigFile(rp.open("rb"), blocksize)
def get_delta_sigfileobj(sig_fileobj, rp_new): """Like get_delta but signature is in a file object""" log.Log("Getting delta of %s with signature stream" % (rp_new.path,), 7) return librsync.DeltaFile(sig_fileobj, rp_new.open("rb"))
def get_delta_sigrp_hash(rp_signature, rp_new): """Like above but also calculate hash of new as close() value""" log.Log("Getting delta (with hash) of %s with signature %s" % (rp_new.path, rp_signature.get_indexpath()), 7) return librsync.DeltaFile(rp_signature.open("rb"), hash.FileWrapper(rp_new.open("rb")))
def write_delta(basis, new, delta, compress = None): """Write rdiff delta which brings basis to new""" log.Log("Writing delta %s from %s -> %s" % (basis.path, new.path, delta.path), 7) deltafile = librsync.DeltaFile(get_signature(basis), new.open("rb")) delta.write_from_fileobj(deltafile, compress)
def clear_rp(self, rp): # not sure how to interpret this # I'll just clear all acl-s from rp.path try: sd = rp.conn.win32security. \ GetNamedSecurityInfo(rp.path, SE_FILE_OBJECT, ACL.flags) except (OSError, IOError, pywintypes.error), exc: log.Log("Warning: unable to read ACL from %s for clearing: %s" % (repr(rp.path), exc), 4) return acl = sd.GetSecurityDescriptorDacl() if acl: n = acl.GetAceCount() # traverse the ACL in reverse, so the indices stay correct while n: n -= 1 acl.DeleteAce(n) sd.SetSecurityDescriptorDacl(0, acl, 0) if ACL.flags & SACL_SECURITY_INFORMATION: acl = sd.GetSecurityDescriptorSacl() if acl: n = acl.GetAceCount() # traverse the ACL in reverse, so the indices stay correct while n: n -= 1 acl.DeleteAce(n) sd.SetSecurityDescriptorSacl(0, acl, 0) try: rp.conn.win32security. \ SetNamedSecurityInfo(rp.path, SE_FILE_OBJECT, ACL.flags, sd.GetSecurityDescriptorOwner(),sd.GetSecurityDescriptorGroup(), sd.GetSecurityDescriptorDacl(), (ACL.flags & SACL_SECURITY_INFORMATION) and sd.GetSecurityDescriptorSacl() or None) except (OSError, IOError, pywintypes.error), exc: log.Log("Warning: unable to set ACL on %s after clearing: %s" % (repr(rp.path), exc), 4)
def listrp(rp): """Like rp.listdir() but return [] if error, and sort results""" def error_handler(exc): log.Log("Error listing directory %s" % rp.path, 2) return [] dir_listing = check_common_error(error_handler, rp.listdir) dir_listing.sort() return dir_listing
def add(cls, extra_args = []): """Add most recent exception to archived list If extra_args are present, convert to strings and add them as extra information to same traceback archive. """ cls._traceback_strings.append(log.Log.exception_to_string(extra_args)) if len(cls._traceback_strings) > 10: cls._traceback_strings = cls._traceback_strings[:10]
def log(cls): """Print all exception information to log file""" if cls._traceback_strings: log.Log("------------ Old traceback info -----------\n%s\n" "-------------------------------------------" % ("\n".join(cls._traceback_strings),), 3)
def set_init_quote_vals_local(): """Set value on local connection, initialize regexps""" global chars_to_quote, quoting_char chars_to_quote = Globals.chars_to_quote if len(Globals.quoting_char) != 1: log.Log.FatalError("Expected single character for quoting char," "got '%s' instead" % (Globals.quoting_char,)) quoting_char = Globals.quoting_char init_quoting_regexps()
def update_quoting(rbdir): """Update the quoting of a repository by renaming any files that should be quoted differently. """ def requote(name): unquoted_name = unquote(name) quoted_name = quote(unquoted_name) if name != quoted_name: return quoted_name else: return None def process(dirpath_rp, name, list): new_name = requote(name) if new_name: if list: list.remove(name) list.append(new_name) name_rp = dirpath_rp.append(name) new_rp = dirpath_rp.append(new_name) log.Log("Re-quoting %s to %s" % (name_rp.path, new_rp.path), 5) rpath.move(name_rp, new_rp) assert rbdir.conn is Globals.local_connection mirror_rp = rbdir.get_parent_rp() mirror = mirror_rp.path log.Log("Re-quoting repository %s" % mirror_rp.path, 3) try: os_walk = os.walk except AttributeError: os_walk = walk for dirpath, dirs, files in os_walk(mirror): dirpath_rp = mirror_rp.newpath(dirpath) for name in dirs: process(dirpath_rp, name, dirs) for name in files: process(dirpath_rp, name, None)
def copy(rpin, rpout, compress = 0): """Copy RPath rpin to rpout. Works for symlinks, dirs, etc. Returns close value of input for regular file, which can be used to pass hashes on. """ log.Log("Regular copying %s to %s" % (rpin.index, rpout.path), 6) if not rpin.lstat(): if rpout.lstat(): rpout.delete() return if rpout.lstat(): if rpin.isreg() or not cmp(rpin, rpout): rpout.delete() # easier to write than compare else: return if rpin.isreg(): return copy_reg_file(rpin, rpout, compress) elif rpin.isdir(): rpout.mkdir() elif rpin.issym(): # some systems support permissions for symlinks, but # only by setting at creation via the umask if Globals.symlink_perms: orig_umask = os.umask(0777 & ~rpin.getperms()) rpout.symlink(rpin.readlink()) if Globals.symlink_perms: os.umask(orig_umask) # restore previous umask elif rpin.ischardev(): major, minor = rpin.getdevnums() rpout.makedev("c", major, minor) elif rpin.isblkdev(): major, minor = rpin.getdevnums() rpout.makedev("b", major, minor) elif rpin.isfifo(): rpout.mkfifo() elif rpin.issock(): rpout.mksock() else: raise RPathException("File %s has unknown type" % rpin.path)
def rename(rp_source, rp_dest): """Rename rp_source to rp_dest""" assert rp_source.conn is rp_dest.conn log.Log(lambda: "Renaming %s to %s" % (rp_source.path, rp_dest.path), 7) if not rp_source.lstat(): rp_dest.delete() else: if rp_dest.lstat() and rp_source.getinode() == rp_dest.getinode() and \ rp_source.getinode() != 0: log.Log("Warning: Attempt to rename over same inode: %s to %s" % (rp_source.path, rp_dest.path), 2) # You can't rename one hard linked file over another rp_source.delete() else: try: rp_source.conn.os.rename(rp_source.path, rp_dest.path) except OSError, error: # XXX errno.EINVAL and len(rp_dest.path) >= 260 indicates # pathname too long on Windows if error.errno != errno.EEXIST: log.Log("OSError while renaming %s to %s" % (rp_source.path, rp_dest.path), 1) raise # On Windows, files can't be renamed on top of an existing file rp_source.conn.os.chmod(rp_dest.path, 0700) rp_source.conn.os.unlink(rp_dest.path) rp_source.conn.os.rename(rp_source.path, rp_dest.path) rp_dest.data = rp_source.data rp_source.data = {'type': None}
def equal_verbose(self, other, check_index = 1, compare_inodes = 0, compare_ownership = 0, compare_acls = 0, compare_eas = 0, compare_win_acls = 0, compare_size = 1, compare_type = 1, verbosity = 2): """Like __eq__, but log more information. Useful when testing""" if check_index and self.index != other.index: log.Log("Index %s != index %s" % (self.index, other.index), verbosity) return None for key in self.data.keys(): # compare dicts key by key if (key in ('uid', 'gid', 'uname', 'gname') and (self.issym() or not compare_ownership)): # Don't compare gid/uid for symlinks, or if told not to pass elif key == 'type' and not compare_type: pass elif key == 'atime' and not Globals.preserve_atime: pass elif key == 'ctime': pass elif key == 'devloc' or key == 'nlink': pass elif key == 'size' and (not self.isreg() or not compare_size): pass elif key == 'inode' and (not self.isreg() or not compare_inodes): pass elif key == 'ea' and not compare_eas: pass elif key == 'acl' and not compare_acls: pass elif key == 'win_acl' and not compare_win_acls: pass elif (not other.data.has_key(key) or self.data[key] != other.data[key]): if not other.data.has_key(key): log.Log("Second is missing key %s" % (key,), verbosity) else: log.Log("Value of %s differs: %s vs %s" % (key, self.data[key], other.data[key]), verbosity) return None return 1
def settime(self, accesstime, modtime): """Change file modification times""" log.Log("Setting time of %s to %d" % (self.path, modtime), 7) try: self.conn.os.utime(self.path, (accesstime, modtime)) except OverflowError: log.Log("Cannot change times of %s to %s - problem is probably" "64->32bit conversion" % (self.path, (accesstime, modtime)), 2) else: self.data['atime'] = accesstime self.data['mtime'] = modtime
def setmtime(self, modtime): """Set only modtime (access time to present)""" log.Log(lambda: "Setting time of %s to %d" % (self.path, modtime), 7) if modtime < 0: log.Log("Warning: modification time of %s is" "before 1970" % self.path, 2) try: self.conn.os.utime(self.path, (long(time.time()), modtime)) except OverflowError: log.Log("Cannot change mtime of %s to %s - problem is probably" "64->32bit conversion" % (self.path, modtime), 2) except OSError: # It's not possible to set a modification time for # directories on Windows. if self.conn.os.name != 'nt' or not self.isdir(): raise else: self.data['mtime'] = modtime
def mkdir(self): log.Log("Making directory " + self.path, 6) self.conn.os.mkdir(self.path) self.setdata()
def makedirs(self): log.Log("Making directory path " + self.path, 6) self.conn.os.makedirs(self.path) self.setdata()
def rmdir(self): log.Log("Removing directory " + self.path, 6) self.conn.os.rmdir(self.path) self.data = {'type': None}
def hardlink(self, linkpath): """Make self into a hardlink joined to linkpath""" log.Log("Hard linking %s to %s" % (self.path, linkpath), 6) self.conn.os.link(linkpath, self.path) self.setdata()
def touch(self): """Make sure file at self.path exists""" log.Log("Touching " + self.path, 7) self.conn.open(self.path, "w").close() self.setdata() assert self.isreg(), self.path
def contains_files(self): """Returns true if self (or subdir) contains any regular files.""" log.Log("Determining if directory contains files: %s" % self.path, 7) if not self.isdir(): return False dir_entries = self.listdir() for entry in dir_entries: child_rp = self.append(entry) if not child_rp.isdir(): return True else: if child_rp.contains_files(): return True return False
def write_from_fileobj(self, fp, compress = None): """Reads fp and writes to self.path. Closes both when done If compress is true, fp will be gzip compressed before being written to self. Returns closing value of fp. """ log.Log("Writing file object to " + self.path, 7) assert not self.lstat(), "File %s already exists" % self.path outfp = self.open("wb", compress = compress) copyfileobj(fp, outfp) if outfp.close(): raise RPathException("Error closing file") self.setdata() return fp.close()
def write_resource_fork(self, rfork_data): """Write new resource fork to self""" log.Log("Writing resource fork to %s" % (self.index,), 7) fp = self.conn.open(os.path.join(self.path, '..namedfork', 'rsrc'), 'wb') fp.write(rfork_data) assert not fp.close() self.set_resource_fork(rfork_data)
def carbonfile2string(cfile): """Convert CarbonFile data to a string suitable for storing.""" if not cfile: return "None" retvalparts = [] retvalparts.append('creator:%s' % binascii.hexlify(cfile['creator'])) retvalparts.append('type:%s' % binascii.hexlify(cfile['type'])) retvalparts.append('location:%d,%d' % cfile['location']) retvalparts.append('flags:%d' % cfile['flags']) try: retvalparts.append('createDate:%d' % cfile['createDate']) except KeyError: log.Log("Writing pre-1.1.6 style metadata, without creation date", 9) return '|'.join(retvalparts)
def unquote_path(quoted_string): """Reverse what was done by quote_path""" def replacement_func(match_obj): """Unquote match obj of two character sequence""" two_chars = match_obj.group(0) if two_chars == "\\n": return "\n" elif two_chars == "\\\\": return "\\" log.Log("Warning, unknown quoted sequence %s found" % two_chars, 2) return two_chars return re.sub("\\\\n|\\\\\\\\", replacement_func, quoted_string)
def iterate(self): """Return iterator that yields all objects with records""" for record in self.iterate_records(): try: yield self.record_to_object(record) except (ParsingError, ValueError), e: if self.at_end: break # Ignore whitespace/bad records at end log.Log("Error parsing flat file: %s" % (e,), 2)
def iterate_starting_with(self, index): """Iterate objects whose index starts with given index""" self.skip_to_index(index) if self.at_end: return while 1: next_pos = self.get_next_pos() try: obj = self.record_to_object(self.buf[:next_pos]) except (ParsingError, ValueError), e: log.Log("Error parsing metadata file: %s" % (e,), 2) else: if obj.index[:len(index)] != index: break yield obj if self.at_end: break self.buf = self.buf[next_pos:] assert not self.fileobj.close()
def ConvertMetaToDiff(self): """Replace a mirror snapshot with a diff if it's appropriate""" newrp, oldrp = self.check_needs_diff() if not newrp: return log.Log("Writing mirror_metadata diff", 6) diff_writer = self.get_meta_writer('diff', oldrp.getinctime()) new_iter = MetadataFile(newrp, 'r').get_objects() old_iter = MetadataFile(oldrp, 'r').get_objects() for diff_rorp in self.get_diffiter(new_iter, old_iter): diff_writer.write_object(diff_rorp) diff_writer.close() # includes sync oldrp.delete()
def open_journal(): """Make sure the journal dir exists (creating it if necessary)""" global journal_dir_rp, journal_dir_fp assert journal_dir_rp is journal_dir_fp is None journal_dir_rp = Globals.rbdir.append("journal") if not journal_dir_rp.lstat(): log.Log("Creating journal directory %s" % (journal_dir_rp.path,), 5) journal_dir_rp.mkdir() assert journal_dir_rp.isdir() journal_dir_fp = journal_dir_rp.open("rb")
def parse_catch_error(self, exc): """Deal with selection error exc""" if isinstance(exc, FilePrefixError): log.Log.FatalError( """Fatal Error: The file specification '%s' cannot match any files in the base directory '%s' Useful file specifications begin with the base directory or some pattern (such as '**') which matches the base directory.""" % (exc, self.prefix)) elif isinstance(exc, GlobbingError): log.Log.FatalError("Fatal Error while processing expression\n" "%s" % exc) else: raise
def parse_last_excludes(self): """Exit with error if last selection function isn't an exclude""" if (self.selection_functions and not self.selection_functions[-1].exclude): log.Log.FatalError( """Last selection expression: %s only specifies that files be included. Because the default is to include all files, the expression is redundant. Exiting because this probably isn't what you meant.""" % (self.selection_functions[-1].name,))
def filelist_read(self, filelist_fp, include, filelist_name): """Read filelist from fp, return (tuplelist, something_excluded)""" prefix_warnings = [0] def incr_warnings(exc): """Warn if prefix is incorrect""" prefix_warnings[0] += 1 if prefix_warnings[0] < 6: log.Log("Warning: file specification '%s' in filelist %s\n" "doesn't start with correct prefix %s. Ignoring." % (exc, filelist_name, self.prefix), 2) if prefix_warnings[0] == 5: log.Log("Future prefix errors will not be logged.", 2) something_excluded, tuple_list = None, [] separator = Globals.null_separator and "\0" or "\n" for line in filelist_fp.read().split(separator): if not line: continue # skip blanks try: tuple = self.filelist_parse_line(line, include) except FilePrefixError, exc: incr_warnings(exc) continue tuple_list.append(tuple) if not tuple[1]: something_excluded = 1 if filelist_fp.close(): log.Log("Error closing filelist %s" % filelist_name, 2) return (tuple_list, something_excluded)
def regexp_get_sf(self, regexp_string, include): """Return selection function given by regexp_string""" assert include == 0 or include == 1 try: regexp = re.compile(regexp_string) except: log.Log("Error compiling regular expression %s" % regexp_string, 1) raise def sel_func(rp): if regexp.search(rp.path): return include else: return None sel_func.exclude = not include sel_func.name = "Regular expression: %s" % regexp_string return sel_func
def get_next_free(): """Return next free filename available in the long filename directory""" global free_name_counter def scan_next_free(): """Return value of free_name_counter by listing long filename dir""" log.Log("Setting next free from long filenames dir", 5) cur_high = 0 for filename in get_long_rp().listdir(): try: i = int(filename.split('.')[0]) except ValueError: continue if i > cur_high: cur_high = i return cur_high + 1 def read_next_free(): """Return next int free by reading the next_free file, or None""" rp = get_long_rp(counter_filename) if not rp.lstat(): return None return int(rp.get_data()) def write_next_free(i): """Write value i into the counter file""" rp = get_long_rp(counter_filename) if rp.lstat(): rp.delete() rp.write_string(str(free_name_counter)) rp.fsync_with_dir() if not free_name_counter: free_name_counter = read_next_free() if not free_name_counter: free_name_counter = scan_next_free() filename = str(free_name_counter) rp = get_long_rp(filename) assert not rp.lstat(), "Unexpected file at %s found" % (rp.path,) free_name_counter += 1 write_next_free(free_name_counter) return filename