Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Convert tabs to spaces mass convert #896

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
60 changes: 30 additions & 30 deletions src/freenet/client/ArchiveContext.java
Original file line number Diff line number Diff line change
Expand Up @@ -22,36 +22,36 @@ public class ArchiveContext implements Serializable {

private static final long serialVersionUID = 1L;
private HashSet<FreenetURI> soFar;
final int maxArchiveLevels;
final long maxArchiveSize;
public ArchiveContext(long maxArchiveSize, int max) {
this.maxArchiveLevels = max;
this.maxArchiveSize = maxArchiveSize;
}
protected ArchiveContext() {
// For serialization.
maxArchiveLevels = 0;
maxArchiveSize = 0;
}
/**
* Check for a loop.
*
* The URI provided is expected to be a reasonably unique identifier for the archive.
*/
public synchronized void doLoopDetection(FreenetURI key) throws ArchiveFailureException {
if(soFar == null) {
soFar = new HashSet<FreenetURI>();
}
if(soFar.size() > maxArchiveLevels)
throw new ArchiveFailureException(ArchiveFailureException.TOO_MANY_LEVELS);
FreenetURI uri = key;
if(!soFar.add(uri)) {
throw new ArchiveFailureException(ArchiveFailureException.ARCHIVE_LOOP_DETECTED);
}
}
final int maxArchiveLevels;
final long maxArchiveSize;
public ArchiveContext(long maxArchiveSize, int max) {
this.maxArchiveLevels = max;
this.maxArchiveSize = maxArchiveSize;
}
protected ArchiveContext() {
// For serialization.
maxArchiveLevels = 0;
maxArchiveSize = 0;
}
/**
* Check for a loop.
*
* The URI provided is expected to be a reasonably unique identifier for the archive.
*/
public synchronized void doLoopDetection(FreenetURI key) throws ArchiveFailureException {
if(soFar == null) {
soFar = new HashSet<FreenetURI>();
}
if(soFar.size() > maxArchiveLevels)
throw new ArchiveFailureException(ArchiveFailureException.TOO_MANY_LEVELS);
FreenetURI uri = key;
if(!soFar.add(uri)) {
throw new ArchiveFailureException(ArchiveFailureException.ARCHIVE_LOOP_DETECTED);
}
}

public synchronized void clear() {
soFar = null;
Expand Down
24 changes: 12 additions & 12 deletions src/freenet/client/ArchiveExtractCallback.java
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,17 @@
* or isn't in it. */
public interface ArchiveExtractCallback extends Serializable {

/** Got the data.
* Note that the bucket will be persistent if the caller asked for an off-thread extraction. */
public void gotBucket(Bucket data, ClientContext context);
/** Not in the archive */
public void notInArchive(ClientContext context);
/** Failed: restart */
public void onFailed(ArchiveRestartException e, ClientContext context);
/** Failed for some other reason */
public void onFailed(ArchiveFailureException e, ClientContext context);
/** Got the data.
* Note that the bucket will be persistent if the caller asked for an off-thread extraction. */
public void gotBucket(Bucket data, ClientContext context);
/** Not in the archive */
public void notInArchive(ClientContext context);
/** Failed: restart */
public void onFailed(ArchiveRestartException e, ClientContext context);
/** Failed for some other reason */
public void onFailed(ArchiveFailureException e, ClientContext context);

}
22 changes: 11 additions & 11 deletions src/freenet/client/ArchiveFailureException.java
Original file line number Diff line number Diff line change
Expand Up @@ -9,18 +9,18 @@
*/
public class ArchiveFailureException extends Exception {

private static final long serialVersionUID = -5915105120222575469L;
public static final String TOO_MANY_LEVELS = "Too many archive levels";
public static final String ARCHIVE_LOOP_DETECTED = "Archive loop detected";
private static final long serialVersionUID = -5915105120222575469L;
public static final String TOO_MANY_LEVELS = "Too many archive levels";
public static final String ARCHIVE_LOOP_DETECTED = "Archive loop detected";

public ArchiveFailureException(String message) {
super(message);
}
public ArchiveFailureException(String message) {
super(message);
}

public ArchiveFailureException(String message, Exception e) {
super(message);
initCause(e);
}
public ArchiveFailureException(String message, Exception e) {
super(message);
initCause(e);
}

}
100 changes: 50 additions & 50 deletions src/freenet/client/ArchiveHandler.java
Original file line number Diff line number Diff line change
Expand Up @@ -18,58 +18,58 @@
*/
public interface ArchiveHandler {

/**
* Get the metadata for this ZIP manifest, as a Bucket.
* THE RETURNED BUCKET WILL ALWAYS BE NON-PERSISTENT.
* @return The metadata as a Bucket, or null.
* @param manager The ArchiveManager.
* @throws FetchException If the container could not be fetched.
* @throws MetadataParseException If there was an error parsing intermediary metadata.
*/
public abstract Bucket getMetadata(ArchiveContext archiveContext,
ArchiveManager manager)
throws ArchiveFailureException, ArchiveRestartException,
MetadataParseException, FetchException;
/**
* Get the metadata for this ZIP manifest, as a Bucket.
* THE RETURNED BUCKET WILL ALWAYS BE NON-PERSISTENT.
* @return The metadata as a Bucket, or null.
* @param manager The ArchiveManager.
* @throws FetchException If the container could not be fetched.
* @throws MetadataParseException If there was an error parsing intermediary metadata.
*/
public abstract Bucket getMetadata(ArchiveContext archiveContext,
ArchiveManager manager)
throws ArchiveFailureException, ArchiveRestartException,
MetadataParseException, FetchException;

/**
* Get a file from this ZIP manifest, as a Bucket.
* If possible, read it from cache. If not, return null.
* THE RETURNED BUCKET WILL ALWAYS BE NON-PERSISTENT.
* @param inSplitZipManifest If true, indicates that the key points to a splitfile zip manifest,
* which means that we need to pass a flag to the fetcher to tell it to pretend it was a straight
* splitfile.
* @param manager The ArchiveManager.
* @throws FetchException
* @throws MetadataParseException
*/
public abstract Bucket get(String internalName,
ArchiveContext archiveContext, ArchiveManager manager)
throws ArchiveFailureException, ArchiveRestartException,
MetadataParseException, FetchException;
/**
* Get a file from this ZIP manifest, as a Bucket.
* If possible, read it from cache. If not, return null.
* THE RETURNED BUCKET WILL ALWAYS BE NON-PERSISTENT.
* @param inSplitZipManifest If true, indicates that the key points to a splitfile zip manifest,
* which means that we need to pass a flag to the fetcher to tell it to pretend it was a straight
* splitfile.
* @param manager The ArchiveManager.
* @throws FetchException
* @throws MetadataParseException
*/
public abstract Bucket get(String internalName,
ArchiveContext archiveContext, ArchiveManager manager)
throws ArchiveFailureException, ArchiveRestartException,
MetadataParseException, FetchException;

/**
* Get the archive type.
*/
public abstract ARCHIVE_TYPE getArchiveType();
/**
* Get the archive type.
*/
public abstract ARCHIVE_TYPE getArchiveType();

/**
* Get the key.
*/
public abstract FreenetURI getKey();
/**
* Unpack a fetched archive to cache, and call the callback if there is one.
* @param bucket The downloaded data for the archive.
* @param actx The ArchiveContext.
* @param element The single element that the caller is especially interested in.
* @param callback Callback to be notified whether the content is available, and if so, fed the data.
* @param manager The ArchiveManager.
* @throws ArchiveFailureException
* @throws ArchiveRestartException
*/
public abstract void extractToCache(Bucket bucket, ArchiveContext actx, String element, ArchiveExtractCallback callback, ArchiveManager manager,
ClientContext context) throws ArchiveFailureException, ArchiveRestartException;
/**
* Get the key.
*/
public abstract FreenetURI getKey();
/**
* Unpack a fetched archive to cache, and call the callback if there is one.
* @param bucket The downloaded data for the archive.
* @param actx The ArchiveContext.
* @param element The single element that the caller is especially interested in.
* @param callback Callback to be notified whether the content is available, and if so, fed the data.
* @param manager The ArchiveManager.
* @throws ArchiveFailureException
* @throws ArchiveRestartException
*/
public abstract void extractToCache(Bucket bucket, ArchiveContext actx, String element, ArchiveExtractCallback callback, ArchiveManager manager,
ClientContext context) throws ArchiveFailureException, ArchiveRestartException;

public abstract ArchiveHandler cloneHandler();
public abstract ArchiveHandler cloneHandler();
}
142 changes: 71 additions & 71 deletions src/freenet/client/ArchiveHandlerImpl.java
Original file line number Diff line number Diff line change
Expand Up @@ -14,76 +14,76 @@ class ArchiveHandlerImpl implements ArchiveHandler, Serializable {
private static final long serialVersionUID = 1L;
private static volatile boolean logMINOR;

static {
Logger.registerClass(ArchiveHandlerImpl.class);
}

private final FreenetURI key;
private boolean forceRefetchArchive;
ARCHIVE_TYPE archiveType;
COMPRESSOR_TYPE compressorType;

ArchiveHandlerImpl(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, boolean forceRefetchArchive) {
this.key = key;
this.archiveType = archiveType;
this.compressorType = ctype;
this.forceRefetchArchive = forceRefetchArchive;
}

@Override
public Bucket get(String internalName, ArchiveContext archiveContext,
ArchiveManager manager)
throws ArchiveFailureException, ArchiveRestartException,
MetadataParseException, FetchException {

if(forceRefetchArchive) return null;

Bucket data;

// Fetch from cache
if(logMINOR)
Logger.minor(this, "Checking cache: "+key+ ' ' +internalName);
if((data = manager.getCached(key, internalName)) != null) {
return data;
}

return null;
}

@Override
public Bucket getMetadata(ArchiveContext archiveContext,
ArchiveManager manager) throws ArchiveFailureException,
ArchiveRestartException, MetadataParseException, FetchException {
return get(".metadata", archiveContext, manager);
}

@Override
public void extractToCache(Bucket bucket, ArchiveContext actx,
String element, ArchiveExtractCallback callback,
ArchiveManager manager, ClientContext context) throws ArchiveFailureException,
ArchiveRestartException {
forceRefetchArchive = false; // now we don't need to force refetch any more
ArchiveStoreContext ctx = manager.makeContext(key, archiveType, compressorType, false);
manager.extractToCache(key, archiveType, compressorType, bucket, actx, ctx, element, callback, context);
}

@Override
public ARCHIVE_TYPE getArchiveType() {
return archiveType;
}

public COMPRESSOR_TYPE getCompressorType() {
return compressorType;
}

@Override
public FreenetURI getKey() {
return key;
}

@Override
public ArchiveHandler cloneHandler() {
return new ArchiveHandlerImpl(key, archiveType, compressorType, forceRefetchArchive);
}
static {
Logger.registerClass(ArchiveHandlerImpl.class);
}

private final FreenetURI key;
private boolean forceRefetchArchive;
ARCHIVE_TYPE archiveType;
COMPRESSOR_TYPE compressorType;

ArchiveHandlerImpl(FreenetURI key, ARCHIVE_TYPE archiveType, COMPRESSOR_TYPE ctype, boolean forceRefetchArchive) {
this.key = key;
this.archiveType = archiveType;
this.compressorType = ctype;
this.forceRefetchArchive = forceRefetchArchive;
}

@Override
public Bucket get(String internalName, ArchiveContext archiveContext,
ArchiveManager manager)
throws ArchiveFailureException, ArchiveRestartException,
MetadataParseException, FetchException {

if(forceRefetchArchive) return null;

Bucket data;

// Fetch from cache
if(logMINOR)
Logger.minor(this, "Checking cache: "+key+ ' ' +internalName);
if((data = manager.getCached(key, internalName)) != null) {
return data;
}

return null;
}

@Override
public Bucket getMetadata(ArchiveContext archiveContext,
ArchiveManager manager) throws ArchiveFailureException,
ArchiveRestartException, MetadataParseException, FetchException {
return get(".metadata", archiveContext, manager);
}

@Override
public void extractToCache(Bucket bucket, ArchiveContext actx,
String element, ArchiveExtractCallback callback,
ArchiveManager manager, ClientContext context) throws ArchiveFailureException,
ArchiveRestartException {
forceRefetchArchive = false; // now we don't need to force refetch any more
ArchiveStoreContext ctx = manager.makeContext(key, archiveType, compressorType, false);
manager.extractToCache(key, archiveType, compressorType, bucket, actx, ctx, element, callback, context);
}

@Override
public ARCHIVE_TYPE getArchiveType() {
return archiveType;
}

public COMPRESSOR_TYPE getCompressorType() {
return compressorType;
}

@Override
public FreenetURI getKey() {
return key;
}

@Override
public ArchiveHandler cloneHandler() {
return new ArchiveHandlerImpl(key, archiveType, compressorType, forceRefetchArchive);
}

}
Loading
Loading