aboutsummaryrefslogtreecommitdiffstats
path: root/fusearchive.py
diff options
context:
space:
mode:
authorSteve Slaven <bpk@hoopajoo.net>2009-07-29 17:50:52 (GMT)
committerSteve Slaven <bpk@hoopajoo.net>2009-07-29 17:50:52 (GMT)
commit423e41caee1a41257a147340e40897d52baf8a77 (patch)
tree3346fd40a4b69b4bd12d1cf4db575c904021c22c /fusearchive.py
parent6eb7e7dff7011bd5bcccab1d20204cdebe4952e5 (diff)
downloadfusearchive-423e41caee1a41257a147340e40897d52baf8a77.zip
fusearchive-423e41caee1a41257a147340e40897d52baf8a77.tar.gz
fusearchive-423e41caee1a41257a147340e40897d52baf8a77.tar.bz2
Changed debug to use param args instead of way too much + str()
Diffstat (limited to 'fusearchive.py')
-rwxr-xr-xfusearchive.py148
1 files changed, 72 insertions, 76 deletions
diff --git a/fusearchive.py b/fusearchive.py
index b95ec65..26e4571 100755
--- a/fusearchive.py
+++ b/fusearchive.py
@@ -9,10 +9,11 @@
#
import os, sys, fcntl, fuse, sha, cPickle, gzip, errno
-import zipfile, stat, logging
+import zipfile, logging
from fuse import Fuse
+from binascii import hexlify
-import pdb
+#import pdb
if not hasattr(fuse, '__version__'):
raise RuntimeError, \
@@ -85,9 +86,9 @@ def _save_chunk_fs( chunk ):
if magic_profiling:
return( [ 0, 0 ] )
- logging.debug( "Begin save_chunk, length: " + str( len( chunk ) ) )
+ logging.debug( "Begin save_chunk, length: %d" % len( chunk ) )
if deep_debug:
- logging.debug( "Chunk: " + str( chunk ) )
+ logging.debug( "Chunk: %s" + hexlify( chunk ) )
# Save this hash string, similar to the backuppc algo
digest = sha.new( chunk ).digest()
@@ -141,16 +142,16 @@ def _save_chunk_fs( chunk ):
savechunk.close()
break
- logging.debug( "Got chunk slot: " + str( sub ) )
+ logging.debug( "Got chunk slot: %d" % sub )
return( [ digest, sub ] )
def _save_chunk_zip( chunk ):
if magic_profiling:
return( [ 0, 0 ] )
- logging.debug( "Begin save_chunk, length: " + str( len( chunk ) ) )
+ logging.debug( "Begin save_chunk, length: %d" % len( chunk ) )
if deep_debug:
- logging.debug( "Chunk: " + str( chunk ) )
+ logging.debug( "Chunk: %s" + hexlify( chunk ) )
# Save this hash string, similar to the backuppc algo
digest = sha.new( chunk ).digest()
@@ -201,7 +202,7 @@ def _save_chunk_zip( chunk ):
break
z.close()
- logging.debug( "Got chunk slot: " + str( sub ) )
+ logging.debug( "Got chunk slot: %d" % sub )
return( [ digest, sub ] )
# This will return a data block by key that was saved previously
@@ -217,7 +218,7 @@ def _load_chunk_fs( key ):
# Todo: make a digest -> path function to share with deflate
hexdigest = ''.join( [ "%02x" % ord( x ) for x in chars ] )
- logging.debug( "Hash is: " + str(hexdigest) + " sub " + str(seq) )
+ logging.debug( "Hash is: %s sub %d" % ( hexdigest, seq ) )
subparts = [ "%02x" % ord( x ) for x in chars[ :magic_depth ] ]
subpath = '/'.join( subparts )
logging.debug( "Subpath: " + subpath )
@@ -235,7 +236,7 @@ def _load_chunk_fs( key ):
raise IOError
if deep_debug:
- logging.debug( "Load-Chunk: " + str( chunk ) )
+ logging.debug( "Load-Chunk: %s" + hexlify( chunk ) )
return chunk
@@ -267,7 +268,7 @@ def _load_chunk_zip( key ):
raise IOError
if deep_debug:
- logging.debug( "Load-Chunk: " + str( chunk ) )
+ logging.debug( "Load-Chunk: %s" + hexlify( chunk ) )
z.close()
return chunk
@@ -291,7 +292,7 @@ class FuseArchiveSerializer:
@staticmethod
def dumpfh( fh, obj ):
- logging.debug( "Going to serialize " + str( obj ) + " to " + str( fh ) )
+ logging.debug( "Going to serialize %s to %s" % ( str( obj ), str( fh ) ) )
fh.truncate( 0 )
fh.seek( 0 )
f = gzip.GzipFile( None, "wb", gzip_compress_level, fh )
@@ -401,7 +402,7 @@ class FuseArchive(Fuse):
def truncate(self, path, len):
# Truncate using the ftruncate on the file
- logging.debug( "Using FuseArchiveFile to truncate " + path + " to " + str(len) )
+ logging.debug( "Using FuseArchiveFile to truncate %s to %d" % ( path, len) )
f = self.FuseArchiveFile( path, os.O_RDWR, 0 )
f.ftruncate(len)
f.release( 0 )
@@ -551,7 +552,7 @@ class FuseArchive(Fuse):
def _load_chunk( self, index ):
# If the current chunk is the same as the chunk we're loading
# just return
- logging.debug( "_load_chunk: " + str( index ) )
+ logging.debug( "_load_chunk: %d" % index )
if index == self.chunk_index:
logging.debug( "Load chunk is same as current chunk, all done" )
@@ -560,7 +561,7 @@ class FuseArchive(Fuse):
# Save this chunk if modified
self._save_chunk()
- logging.debug( "Loading chunk " + str(index) )
+ logging.debug( "Loading chunk %d" % index )
key = None
size = len( self.chunks )
@@ -574,13 +575,13 @@ class FuseArchive(Fuse):
logging.debug( "Found cached dirty page" )
self.chunk = key
else:
- logging.debug( "Index: " + str( key ) )
+ logging.debug( "Index: %s" % key )
self.chunk = load_chunk( key )
else:
logging.debug( "No chunk at this index, loading nothing" )
self.chunk = ''
- logging.debug( "Loaded chunk of length: " + str( len( self.chunk ) ) )
+ logging.debug( "Loaded chunk of length: %d" % len( self.chunk ) )
self.chunk_index = index
self.chunk_modified = False
@@ -588,7 +589,7 @@ class FuseArchive(Fuse):
# This simply puts the chunk data inside our current chunks at chunk_index
def _save_chunk(self):
if self.chunk_modified:
- logging.debug( "Saving chunk " + str(self.chunk_index) )
+ logging.debug( "Saving chunk %d" % self.chunk_index )
# Make sure we have room for this chunk
size = len( self.chunks )
@@ -599,8 +600,8 @@ class FuseArchive(Fuse):
if isinstance( self.chunks[ self.chunk_index ], list ) or \
len( self.chunks[ self.chunk_index ] ) == 0:
self.dirty_chunks += 1
- logging.debug( "Dirty chunks is now: " + str( self.dirty_chunks ) )
- logging.debug( "Dirty flush at: " + str( dirty_flush ) )
+ logging.debug( "Dirty chunks is now: %d" % self.dirty_chunks )
+ logging.debug( "Dirty flush at: %d" % dirty_flush )
# Save the dirty chunk temporarily in memory
self.chunks[ self.chunk_index ] = self.chunk
@@ -613,17 +614,15 @@ class FuseArchive(Fuse):
def _flush_chunks(self):
for index in range( len( self.chunks ) ):
if isinstance( self.chunks[ index ], str ):
- logging.debug( "Flushing chunk at " + str( index ) )
+ logging.debug( "Flushing chunk at %d" % index )
key = save_chunk( self.chunks[ index ] )
self.chunks[ index ] = key
- logging.debug( "Key was " + str( key ) )
+ logging.debug( "Key was %s" % key )
self.dirty_chunks = 0
def read(self, length, offset):
- logging.debug( "Reading from " + self.orig_path + " offset: " + str( offset )
- + ' (0x%x) ' % offset
- + " length: " + str( length )
- + ' (0x%x)' % length )
+ logging.debug( "Reading from %s offset: %d (0x%x) length: %d (0x%d)" %
+ ( self.orig_path, offset, offset, length, length ) )
data_read = 0
data = ''
@@ -633,7 +632,7 @@ class FuseArchive(Fuse):
# Keep reading chunks until we have at least this much data
while data_read < length and not is_eof:
- logging.debug( "Pulling chunk data: " + str( index ) )
+ logging.debug( "Pulling chunk data: %d" % index )
self._load_chunk( index )
if len(self.chunk):
chunk_remaining = len(self.chunk) - rest
@@ -642,11 +641,11 @@ class FuseArchive(Fuse):
if data_left < chunk_remaining:
to_read = data_left
- logging.debug( "chunk_remaining: " + str( chunk_remaining ) )
- logging.debug( "data_left: " + str( data_left ) )
- logging.debug( "data_read: " + str( data_read ) )
- logging.debug( "rest: " + str( rest ) )
- logging.debug( "Copying " + str(to_read) + " bytes" )
+ logging.debug( "chunk_remaining: %d" % chunk_remaining )
+ logging.debug( "data_left: %d" % data_left )
+ logging.debug( "data_read: %d" % data_read )
+ logging.debug( "rest: %d" % rest )
+ logging.debug( "Copying %d bytes" % to_read )
data += self.chunk[ rest:(rest+to_read) ]
data_read += to_read
@@ -656,38 +655,36 @@ class FuseArchive(Fuse):
logging.debug( "No more chunk data, bye" )
is_eof = True
- logging.debug( "Returning " + str( len( data ) ) + " bytes of data" )
- logging.debug( "Internal count was: " + str( data_read ) )
+ logging.debug( "Returning %d bytes of data" % len( data ) )
+ logging.debug( "Internal count was: %d" % data_read )
return data
def write(self, buf, offset):
if magic_profiling:
return len( buf )
- logging.debug( "Writing to " + self.orig_path + " offset: " + str( offset ) +
- ' (0x%x) ' % offset
- + ' length: ' + str( len( buf ) )
- + ' (0x%x)' % len( buf ) )
+ logging.debug( "Writing to %s offset: %d (0x%x) length: %d (0x%x)" %
+ ( self.orig_path, offset, offset, len( buf ), len( buf ) ) )
index = int( offset / self.chunk_size )
rest = offset % self.chunk_size
- logging.debug( "This chunk falls on index: " + str( index ) + " rest: " + str( rest ) )
- logging.debug( "We have " + str( len( self.chunks ) ) + " chunks" )
- logging.debug( "File size is: " + str( self.size ) )
+ logging.debug( "This chunk falls on index: %d rest: %d" % ( index, rest ) )
+ logging.debug( "We have %d chunks" % len( self.chunks ) )
+ logging.debug( "File size is: %d" % self.size )
# If index is higher than the number of blocks we current have it's a seek hole, so we need to extend our blocks out
# We know these need to essentially be zeroed up to this size since
if len( self.chunks ) - 1 < index:
- logging.debug( "Not enough chunks " + str( len( self.chunks ) ) + ", need " +
- str( index + 1 ) + ", extending" )
+ logging.debug( "Not enough chunks %d, need %d, extending" %
+ ( len( self.chunks ), index + 1 ) )
this_index = 0
while this_index < index:
self._load_chunk( this_index )
fill_null = self.chunk_size - len(self.chunk)
- logging.debug( "Filling this chunk with null, bytes: " + str( fill_null ) )
+ logging.debug( "Filling this chunk with null, bytes: %d" % fill_null )
self.chunk += "\0" * fill_null
- logging.debug( "Chunk is now: " + str( len( self.chunk) ) + " bytes" )
+ logging.debug( "Chunk is now: %d bytes" % len( self.chunk) )
self.chunk_modified = True
self._save_chunk()
this_index += 1
@@ -697,7 +694,7 @@ class FuseArchive(Fuse):
# Now check if this chunk needs to be extended
if len( self.chunk ) < rest:
fill_null = rest - len(self.chunk)
- logging.debug( "Filling final chunk with null, bytes: " + str( fill_null ) )
+ logging.debug( "Filling final chunk with null, bytes: %d" % fill_null )
self.chunk += "\0" * fill_null
self.chunk_modified = True
self._save_chunk()
@@ -705,31 +702,30 @@ class FuseArchive(Fuse):
buf_offset = 0
buf_len = len(buf)
- logging.debug( "Length: " + str( buf_len ) )
+ logging.debug( "Length: %d" % buf_len )
while( buf_offset < buf_len ):
- logging.debug( "Pulling in chunk for writing: " + str(index) )
+ logging.debug( "Pulling in chunk for writing: %d" % index )
self._load_chunk( index )
buf_remain = buf_len - buf_offset
chunk_remain = self.chunk_size - rest
- logging.debug( "buf_remain: " + str(buf_remain) )
- logging.debug( "chunk_remain: " + str(chunk_remain) )
+ logging.debug( "buf_remain: %d" % buf_remain )
+ logging.debug( "chunk_remain: %d" % chunk_remain )
if chunk_remain < buf_remain:
- logging.debug( "Writing " + str( chunk_remain ) + " bytes, buffer boundry" )
+ logging.debug( "Writing %d bytes, buffer boundry" % chunk_remain )
this_len = chunk_remain
else:
- logging.debug( "Writing final " + str( buf_remain ) + " bytes" )
+ logging.debug( "Writing final %d bytes" % buf_remain )
this_len = buf_remain
- logging.debug( "Bytes to copy: " + str(this_len) )
- logging.debug( " buf offset: " + str(buf_offset) )
- logging.debug( " chunk offset: " + str(rest) )
-
+ logging.debug( "Bytes to copy: %d" % this_len )
+ logging.debug( " buf offset: %d" % buf_offset )
+ logging.debug( " chunk offset: %d" % rest )
if deep_debug:
- logging.debug( "Pre-Buf: " + str(buf) )
- logging.debug( "Pre-Chunk: " + str(self.chunk) )
+ logging.debug( "Pre-Buf: %s" % hexlify(buf) )
+ logging.debug( "Pre-Chunk: %s" % hexlify(self.chunk) )
# Since python doesn't do in-place reassignment like you
# can with splice() we will reconstruct the data by joining
@@ -740,8 +736,8 @@ class FuseArchive(Fuse):
self.chunk[ (rest + this_len): ]
if deep_debug:
- logging.debug( "Post-Buf: " + str(buf) )
- logging.debug( "Post-Chunk: " + str(self.chunk) )
+ logging.debug( "Post-Buf: %s" % hexlify(buf) )
+ logging.debug( "Post-Chunk: %s" % hexlify(self.chunk) )
buf_offset += this_len
@@ -755,9 +751,9 @@ class FuseArchive(Fuse):
if offset + len(buf) > self.size:
self.size = offset + len(buf)
- logging.debug( "This chunk size is now: " + str(len(self.chunk)) )
- logging.debug( "File size is now: " + str(self.size) )
- logging.debug( "Num Chunks: " + str( len( self.chunks ) ) )
+ logging.debug( "This chunk size is now: %d" % len( self.chunk ) )
+ logging.debug( "File size is now: %d" % self.size )
+ logging.debug( "Num Chunks: %d" % len( self.chunks ) )
# Mark us in the dirty cache
dirty_cache[ self.orig_path ] = self
@@ -789,7 +785,7 @@ class FuseArchive(Fuse):
numchunks = len( self.chunks )
if numchunks > 0:
# Load the last chunk
- logging.debug( "We have " + str(numchunks) + " chunks, calculating size" )
+ logging.debug( "We have %d chunks, calculating size" % numchunks )
self._load_chunk( numchunks - 1 )
self.size = ( numchunks - 1 ) * self.chunk_size + \
len( self.chunk )
@@ -803,7 +799,7 @@ class FuseArchive(Fuse):
+ self.orig_path + " = " + str( self.size ) \
+ " doesn't match internal size " + str( save_size ) \
+ "\nProbably a bug in write or ftruncate!"
- logging.debug( "Size calculated is: " + str( self.size ) + ' (0x%x)' % self.size )
+ logging.debug( "Size calculated is: %d (0x%x)" % ( self.size, self.size ) )
FuseArchiveSerializer.dumpfh( self.file, {
'size': self.size,
@@ -855,11 +851,11 @@ class FuseArchive(Fuse):
curr_chunks = len( self.chunks )
need_chunks = ( length / self.chunk_size )
extra_bytes = length % self.chunk_size
- logging.debug( "Ftruncate - " + str(length) + ' (0x%x) ' % length )
- logging.debug( " - self.size: " + str( self.size ) )
- logging.debug( " - curr_chunks: " + str( curr_chunks ) )
- logging.debug( " - need_chunks: " + str( need_chunks ) )
- logging.debug( " - extra_bytes: " + str( extra_bytes ) )
+ logging.debug( "Ftruncate - %d (0x%x)" % ( length, length ) )
+ logging.debug( " - self.size: %d" % self.size )
+ logging.debug( " - curr_chunks: %d" % curr_chunks )
+ logging.debug( " - need_chunks: %d" % need_chunks )
+ logging.debug( " - extra_bytes: %d" % extra_bytes )
if extra_bytes:
logging.debug( "Need an extra chunk" )
@@ -872,18 +868,18 @@ class FuseArchive(Fuse):
self.chunks = []
self.chunk = ''
elif self.size <= length:
- logging.debug( "Need to pad out file, writing/seeking to " + str( length ) )
+ logging.debug( "Need to pad out file, writing/seeking to %d" % length )
# Just write out null bytes to the length requested, write will do this for us if we specify the offset
self.write( '', length )
else:
logging.debug( "Truncating chunks" )
while True:
- logging.debug( "Need chunks: " + str( need_chunks ) + " curr: " + str( curr_chunks ) )
+ logging.debug( "Need chunks: %d curr: %d" % ( need_chunks, curr_chunks ) )
if need_chunks == curr_chunks:
break
- logging.debug( "Deleting chunk " + str( self.chunk_index ) )
+ logging.debug( "Deleting chunk %d" % self.chunk_index )
self.chunks.pop()
curr_chunks = len( self.chunks )
@@ -891,13 +887,13 @@ class FuseArchive(Fuse):
# last chunk
if len( self.chunks ):
self._load_chunk( len( self.chunks ) - 1 )
- logging.debug( "Loaded final chunk, len: " + str( len( self.chunk ) ) )
+ logging.debug( "Loaded final chunk, len: %d" % len( self.chunk ) )
# Now truncate this item if needed
if len( self.chunk ) > extra_bytes:
- logging.debug( "Truncating final chunk to " + str( extra_bytes ) )
+ logging.debug( "Truncating final chunk to %d" % extra_bytes )
self.chunk = self.chunk[ :extra_bytes ]
- logging.debug( "Chunk is now: " + str( len( self.chunk) ) + " bytes" )
+ logging.debug( "Chunk is now: %d bytes" % len( self.chunk ) )
self.chunk_modified = True
self.modified = True