aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xfusearchive.py89
1 files changed, 50 insertions, 39 deletions
diff --git a/fusearchive.py b/fusearchive.py
index 65183b7..41d0ad9 100755
--- a/fusearchive.py
+++ b/fusearchive.py
@@ -31,6 +31,7 @@ logging.basicConfig( level = logging.DEBUG,
magic_profiling = False
enable_stats = False
enable_psyco = False
+deep_debug = False
# These control some of the file output
magic_blocksize = 1024 * 128
@@ -77,7 +78,7 @@ def _save_chunk_fs( chunk ):
return( [ 0, 0 ] )
logging.debug( "Begin save_chunk, length: " + str( len( chunk ) ) )
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
+ if deep_debug:
logging.debug( "Chunk: " + str( chunk ) )
# Save this hash string, similar to the backuppc algo
@@ -140,7 +141,7 @@ def _save_chunk_zip( chunk ):
return( [ 0, 0 ] )
logging.debug( "Begin save_chunk, length: " + str( len( chunk ) ) )
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
+ if deep_debug:
logging.debug( "Chunk: " + str( chunk ) )
# Save this hash string, similar to the backuppc algo
@@ -225,7 +226,7 @@ def _load_chunk_fs( key ):
else:
raise IOError
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
+ if deep_debug:
logging.debug( "Load-Chunk: " + str( chunk ) )
return chunk
@@ -257,7 +258,7 @@ def _load_chunk_zip( key ):
z.close()
raise IOError
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
+ if deep_debug:
logging.debug( "Load-Chunk: " + str( chunk ) )
z.close()
@@ -275,36 +276,36 @@ class FuseArchiveStream:
class FuseArchiveSerializer:
"""This lets us experiment with different main file serializers"""
@staticmethod
- def dump( file, obj ):
- out = FuseArchiveStream.open( file, "wb" )
- FuseArchiveSerializer.dumpfh( obj, out, -1 ) # new file format
+ def dump( f, obj ):
+ out = FuseArchiveStream.open( f, "wb" )
+ FuseArchiveSerializer.dumpfh( obj, out ) # new file format
out.close()
@staticmethod
def dumpfh( fh, obj ):
fh.truncate( 0 )
- file = gzip.GzipFile( None, "wb", gzip_compress_level, fh )
- #file = fh
- cPickle.dump( obj, file, -1 ) # new file format
- file.flush()
+ f = gzip.GzipFile( None, "wb", gzip_compress_level, fh )
+ #f = fh
+ cPickle.dump( obj, f, -1 ) # new file format
+ f.flush()
@staticmethod
- def load( file ):
+ def load( f ):
if magic_profiling:
return { 'size': 0, 'chunks': 0, 'chunk_size': 0 }
- inp = FuseArchiveStream.open( file, "rb" )
- magic = FuseArchiveSerialize.loadfh( inp )
+ inp = FuseArchiveStream.open( f, "rb" )
+ magic = FuseArchiveSerializer.loadfh( inp )
inp.close()
return magic
@staticmethod
def loadfh( fh ):
fh.seek( 0 )
- file = gzip.GzipFile( None, "rb", gzip_compress_level, fh )
- #file = fh
+ f = gzip.GzipFile( None, "rb", gzip_compress_level, fh )
+ #f = fh
#pdb.set_trace()
- magic = cPickle.load( file )
+ magic = cPickle.load( f )
return( magic )
class FuseArchiveStat(fuse.Stat):
@@ -381,6 +382,7 @@ class FuseArchive(Fuse):
def truncate(self, path, len):
# Truncate using the ftruncate on the file
+ pdb.set_trace()
logging.debug( "Using FuseArchiveFile to truncate " + path + " to " + str(len) )
f = self.FuseArchiveFile( path, os.O_WRONLY | os.O_APPEND, 0 )
f.ftruncate(len)
@@ -476,17 +478,17 @@ class FuseArchive(Fuse):
self.chunks = []
# TODO: Better flag handling here?
- if flags | os.O_RDONLY:
+ if flags & os.O_RDONLY:
self.rd = True
- if flags | os.O_RDWR:
+ if flags & os.O_RDWR:
self.rd = True
self.wr = True
- if flags | os.O_WRONLY:
+ if flags & os.O_WRONLY:
self.wr = True
- if flags | os.O_APPEND:
+ if flags & os.O_APPEND:
self.wr = True
# TODO: handle offset -1
self.offset = -1
@@ -634,11 +636,24 @@ class FuseArchive(Fuse):
if magic_profiling:
return len( buf )
- logging.debug( "Writing to " + self.orig_path + " offset: " + str( offset ) )
+ logging.debug( "Writing to " + self.orig_path + " offset: " + str( offset ) +
+ ' (0x%x)' % offset )
index = int( offset / self.chunk_size )
rest = offset % self.chunk_size
+ # If index is higher than the number of blocks we current have it's a seek hole, so we need to extend our blocks out
+ # We know these need to essentially be zeroed up to this size since
+ while index > len( self.chunks ):
+ logging.debug( "Not enough chunks " + str( len( self.chunks ) ) + ", need " +
+ str( index ) + ", extending" )
+ self.chunk_index = -1
+ while self.chunk_index < index:
+ self._load_chunk( self.chunk_index + 1 )
+ fill_null = self.chunk_size - len(self.chunk)
+ logging.debug( "Filling this chunk with null, bytes: " + fill_null )
+ self.chunk += "\0" * fill_null
+
buf_offset = 0
buf_len = len(buf)
@@ -664,7 +679,7 @@ class FuseArchive(Fuse):
logging.debug( " chunk offset: " + str(rest) )
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
+ if deep_debug:
logging.debug( "Pre-Buf: " + str(buf) )
logging.debug( "Pre-Chunk: " + str(self.chunk) )
@@ -677,7 +692,7 @@ class FuseArchive(Fuse):
buf[ buf_offset:(buf_offset+this_len) ] + \
self.chunk[ (rest + this_len): ]
- if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
+ if deep_debug:
logging.debug( "Post-Buf: " + str(buf) )
logging.debug( "Post-Chunk: " + str(self.chunk) )
@@ -689,7 +704,11 @@ class FuseArchive(Fuse):
self.chunk_modified = True
self.modified = True
+ if offset + len(buf) > self.size:
+ self.size = offset + len(buf)
+
logging.debug( "Chunk size is now: " + str(len(self.chunk)) )
+ logging.debug( "File size is now: " + str(self.size) )
return len(buf)
# BUG: If you cp -a a file then quickly ls -l sometimes it doesn't show
@@ -754,11 +773,12 @@ class FuseArchive(Fuse):
logging.debug( "Overridding fgetattr" )
stats = FuseArchiveStat( os.lstat( "./tree" + self.orig_path ) )
- if self.modified:
+ # Fixed in write?
+ #if self.modified:
# We would need to fsync here to recalc size, but don't do
# it unless modified? otherwise simple getattr will be
# rewriting a ton of files
- print "WARNING: self.modified causes fgetattr to be incorrect!"
+ # print "WARNING: self.modified causes fgetattr to be incorrect!"
stats.overstat( self.size )
return stats
@@ -786,20 +806,10 @@ class FuseArchive(Fuse):
logging.debug( "Creating 0 chunk file" )
self.chunks = []
elif need_chunks > curr_chunks:
- logging.debug( "Need to pad out file" )
-
- # Just write out null bytes to the length requested
- self.flush()
- tmp_size = self.size
- while tmp_size < length:
- to_write = 4096
- if tmp_size + to_write > length:
- to_write = length - tmp_size
-
- logging.debug( "Writing " + str( to_write ) + " bytes to extend file to " + str( length ) )
- self.write( "\0" * to_write, tmp_size + 1 )
- tmp_size += to_write
+ logging.debug( "Need to pad out file, writing/seeking to " + str( length ) )
+ # Just write out null bytes to the length requested, write will do this for us if we specify the offset
+ self.write( '', length )
else:
logging.debug( "Truncating chunks" )
while True:
@@ -827,6 +837,7 @@ class FuseArchive(Fuse):
self.chunk = self.chunk[ :extra_bytes ]
self.modified = True
+ self.size = length
self._load_chunk( 0 )
self._fflush()