@@ -390,17 +390,17 @@ private void startPersistenceRetriever(int[] bucketSizes, long capacity) {
390390 try {
391391 retrieveFromFile (bucketSizes );
392392 LOG .info ("Persistent bucket cache recovery from {} is complete." , persistencePath );
393- } catch (IOException ioex ) {
394- LOG .error ("Can't restore from file[{}] because of " , persistencePath , ioex );
393+ } catch (Throwable ex ) {
394+ LOG .error ("Can't restore from file[{}] because of " , persistencePath , ex );
395395 backingMap .clear ();
396396 fullyCachedFiles .clear ();
397397 backingMapValidated .set (true );
398+ regionCachedSize .clear ();
398399 try {
399400 bucketAllocator = new BucketAllocator (capacity , bucketSizes );
400- } catch (BucketAllocatorException ex ) {
401- LOG .error ("Exception during Bucket Allocation" , ex );
401+ } catch (BucketAllocatorException allocatorException ) {
402+ LOG .error ("Exception during Bucket Allocation" , allocatorException );
402403 }
403- regionCachedSize .clear ();
404404 } finally {
405405 this .cacheState = CacheState .ENABLED ;
406406 startWriterThreads ();
@@ -951,7 +951,8 @@ public void logStats() {
951951 : (StringUtils .formatPercent (cacheStats .getHitCachingRatio (), 2 ) + ", " ))
952952 + "evictions=" + cacheStats .getEvictionCount () + ", " + "evicted="
953953 + cacheStats .getEvictedCount () + ", " + "evictedPerRun=" + cacheStats .evictedPerEviction ()
954- + ", " + "allocationFailCount=" + cacheStats .getAllocationFailCount ());
954+ + ", " + "allocationFailCount=" + cacheStats .getAllocationFailCount () + ", blocksCount="
955+ + backingMap .size ());
955956 cacheStats .reset ();
956957
957958 bucketAllocator .logDebugStatistics ();
@@ -1496,7 +1497,7 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException {
14961497 } else if (Arrays .equals (pbuf , BucketProtoUtils .PB_MAGIC_V2 )) {
14971498 // The new persistence format of chunked persistence.
14981499 LOG .info ("Reading new chunked format of persistence." );
1499- retrieveChunkedBackingMap (in , bucketSizes );
1500+ retrieveChunkedBackingMap (in );
15001501 } else {
15011502 // In 3.0 we have enough flexibility to dump the old cache data.
15021503 // TODO: In 2.x line, this might need to be filled in to support reading the old format
@@ -1626,52 +1627,33 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio
16261627 }
16271628
16281629 private void persistChunkedBackingMap (FileOutputStream fos ) throws IOException {
1629- long numChunks = backingMap .size () / persistenceChunkSize ;
1630- if (backingMap .size () % persistenceChunkSize != 0 ) {
1631- numChunks += 1 ;
1632- }
1633-
16341630 LOG .debug (
16351631 "persistToFile: before persisting backing map size: {}, "
16361632 + "fullycachedFiles size: {}, chunkSize: {}, numberofChunks: {}" ,
1637- backingMap .size (), fullyCachedFiles .size (), persistenceChunkSize , numChunks );
1633+ backingMap .size (), fullyCachedFiles .size (), persistenceChunkSize );
16381634
1639- BucketProtoUtils .serializeAsPB (this , fos , persistenceChunkSize , numChunks );
1635+ BucketProtoUtils .serializeAsPB (this , fos , persistenceChunkSize );
16401636
16411637 LOG .debug (
1642- "persistToFile: after persisting backing map size: {}, "
1643- + "fullycachedFiles size: {}, numChunksPersisteed: {}" ,
1644- backingMap .size (), fullyCachedFiles .size (), numChunks );
1638+ "persistToFile: after persisting backing map size: {}, " + "fullycachedFiles size: {}" ,
1639+ backingMap .size (), fullyCachedFiles .size ());
16451640 }
16461641
1647- private void retrieveChunkedBackingMap (FileInputStream in , int [] bucketSizes ) throws IOException {
1648- byte [] bytes = new byte [Long .BYTES ];
1649- int readSize = in .read (bytes );
1650- if (readSize != Long .BYTES ) {
1651- throw new IOException ("Invalid size of chunk-size read from persistence: " + readSize );
1652- }
1653- long batchSize = Bytes .toLong (bytes , 0 );
1654-
1655- readSize = in .read (bytes );
1656- if (readSize != Long .BYTES ) {
1657- throw new IOException ("Invalid size for number of chunks read from persistence: " + readSize );
1658- }
1659- long numChunks = Bytes .toLong (bytes , 0 );
1660-
1661- LOG .info ("Number of chunks: {}, chunk size: {}" , numChunks , batchSize );
1642+ private void retrieveChunkedBackingMap (FileInputStream in ) throws IOException {
16621643
16631644 // Read the first chunk that has all the details.
16641645 BucketCacheProtos .BucketCacheEntry firstChunk =
16651646 BucketCacheProtos .BucketCacheEntry .parseDelimitedFrom (in );
16661647 parseFirstChunk (firstChunk );
16671648
16681649 // Subsequent chunks have the backingMap entries.
1669- for ( int i = 1 ; i < numChunks ; i ++) {
1670- LOG . info ( "Reading chunk no: {}" , i + 1 );
1650+ int numChunks = 0 ;
1651+ while ( in . available () > 0 ) {
16711652 parseChunkPB (BucketCacheProtos .BackingMap .parseDelimitedFrom (in ),
16721653 firstChunk .getDeserializersMap ());
1673- LOG . info ( "Retrieved chunk: {}" , i + 1 ) ;
1654+ numChunks ++ ;
16741655 }
1656+ LOG .info ("Retrieved {} of chunks with blockCount = {}." , numChunks , backingMap .size ());
16751657 verifyFileIntegrity (firstChunk );
16761658 verifyCapacityAndClasses (firstChunk .getCacheCapacity (), firstChunk .getIoClass (),
16771659 firstChunk .getMapClass ());
0 commit comments