1010#include <string.h>
1111
1212#include "cache.h"
13+ #include "mpool.h"
1314
1415#define MIN (a , b ) ((a < b) ? a : b)
1516#define GOLDEN_RATIO_32 0x61C88647
1617#define HASH (val ) \
1718 (((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
19+ /* If the frequency of block exceeds THRESHOLD, it would be translated into
20+ * machine code */
21+ #define THRESHOLD 1000
1822
1923static uint32_t cache_size , cache_size_bits ;
24+ static struct mpool * cache_mp ;
2025
26+ #if RV32_HAS (ARC )
2127/*
2228 * Adaptive Replacement Cache (ARC) improves the fundamental LRU strategy
2329 * by dividing the cache into two lists, T1 and T2. list T1 is for LRU
@@ -30,14 +36,15 @@ static uint32_t cache_size, cache_size_bits;
3036 * we increase T1's size while decreasing T2. But, if the cache hit occurs in
3137 * B2, we would increase the size of T2 and decrease the size of T1.
3238 */
39+
3340typedef enum {
3441 LRU_list ,
3542 LFU_list ,
3643 LRU_ghost_list ,
3744 LFU_ghost_list ,
3845 N_CACHE_LIST_TYPES
3946} cache_list_t ;
40-
47+ #endif
4148struct list_head {
4249 struct list_head * prev , * next ;
4350};
@@ -50,6 +57,7 @@ struct hlist_node {
5057 struct hlist_node * next , * * pprev ;
5158};
5259
60+ #if RV32_HAS (ARC )
5361/*
5462 * list maintains four cache lists T1, T2, B1, and B2.
5563 * ht_list maintains hashtable and improves the performance of cache searching.
@@ -61,17 +69,31 @@ typedef struct {
6169 struct list_head list ;
6270 struct hlist_node ht_list ;
6371} arc_entry_t ;
72+ #else
73+ typedef struct {
74+ void * value ;
75+ uint32_t key ;
76+ uint32_t frequency ;
77+ struct list_head list ;
78+ struct hlist_node ht_list ;
79+ } lfu_entry_t ;
80+ #endif
6481
6582typedef struct {
6683 struct hlist_head * ht_list_head ;
6784} hashtable_t ;
6885
6986typedef struct cache {
87+ #if RV32_HAS (ARC )
7088 struct list_head * lists [N_CACHE_LIST_TYPES ];
7189 uint32_t list_size [N_CACHE_LIST_TYPES ];
90+ uint32_t lru_capacity ;
91+ #else
92+ struct list_head * lists [THRESHOLD ];
93+ uint32_t list_size ;
94+ #endif
7295 hashtable_t * map ;
7396 uint32_t capacity ;
74- uint32_t lru_capacity ;
7597} cache_t ;
7698
7799static inline void INIT_LIST_HEAD (struct list_head * head )
@@ -80,6 +102,13 @@ static inline void INIT_LIST_HEAD(struct list_head *head)
80102 head -> prev = head ;
81103}
82104
105+ #if !RV32_HAS (ARC )
106+ static inline int list_empty (const struct list_head * head )
107+ {
108+ return (head -> next == head );
109+ }
110+ #endif
111+
83112static inline void list_add (struct list_head * node , struct list_head * head )
84113{
85114 struct list_head * next = head -> next ;
@@ -107,6 +136,9 @@ static inline void list_del_init(struct list_head *node)
107136
108137#define list_entry (node , type , member ) container_of(node, type, member)
109138
139+ #define list_first_entry (head , type , member ) \
140+ list_entry((head)->next, type, member)
141+
110142#define list_last_entry (head , type , member ) \
111143 list_entry((head)->prev, type, member)
112144
@@ -194,14 +226,15 @@ static inline void hlist_del_init(struct hlist_node *n)
194226 pos = hlist_entry_safe((pos)->member.next, type, member))
195227#endif
196228
229+
197230cache_t * cache_create (int size_bits )
198231{
199232 cache_t * cache = malloc (sizeof (cache_t ));
200233 if (!cache )
201234 return NULL ;
202235 cache_size_bits = size_bits ;
203236 cache_size = 1 << size_bits ;
204-
237+ #if RV32_HAS ( ARC )
205238 for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
206239 cache -> lists [i ] = malloc (sizeof (struct list_head ));
207240 INIT_LIST_HEAD (cache -> lists [i ]);
@@ -224,12 +257,41 @@ cache_t *cache_create(int size_bits)
224257 for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
225258 INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
226259 }
260+ cache -> lru_capacity = cache_size / 2 ;
261+ cache_mp =
262+ mpool_create (cache_size * 2 * sizeof (arc_entry_t ), sizeof (arc_entry_t ));
263+ #else
264+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
265+ cache -> lists [i ] = malloc (sizeof (struct list_head ));
266+ INIT_LIST_HEAD (cache -> lists [i ]);
267+ }
227268
269+ cache -> map = malloc (sizeof (hashtable_t ));
270+ if (!cache -> map ) {
271+ free (cache -> lists );
272+ free (cache );
273+ return NULL ;
274+ }
275+ cache -> map -> ht_list_head = malloc (cache_size * sizeof (struct hlist_head ));
276+ if (!cache -> map -> ht_list_head ) {
277+ free (cache -> map );
278+ free (cache -> lists );
279+ free (cache );
280+ return NULL ;
281+ }
282+ for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
283+ INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
284+ }
285+ cache -> list_size = 0 ;
286+ cache_mp =
287+ mpool_create (cache_size * sizeof (lfu_entry_t ), sizeof (lfu_entry_t ));
288+ #endif
228289 cache -> capacity = cache_size ;
229- cache -> lru_capacity = cache_size / 2 ;
230290 return cache ;
231291}
232292
293+
294+ #if RV32_HAS (ARC )
233295/* Rules of ARC
234296 * 1. size of LRU_list + size of LFU_list <= c
235297 * 2. size of LRU_list + size of LRU_ghost_list <= c
@@ -273,12 +335,14 @@ static inline void move_to_mru(cache_t *cache,
273335 list_del_init (& entry -> list );
274336 list_add (& entry -> list , cache -> lists [type ]);
275337}
338+ #endif
276339
277340void * cache_get (cache_t * cache , uint32_t key )
278341{
279342 if (!cache -> capacity || hlist_empty (& cache -> map -> ht_list_head [HASH (key )]))
280343 return NULL ;
281344
345+ #if RV32_HAS (ARC )
282346 arc_entry_t * entry = NULL ;
283347#ifdef __HAVE_TYPEOF
284348 hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
@@ -323,13 +387,35 @@ void *cache_get(cache_t *cache, uint32_t key)
323387 }
324388
325389 CACHE_ASSERT (cache );
390+ #else
391+ lfu_entry_t * entry = NULL ;
392+ #ifdef __HAVE_TYPEOF
393+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
394+ #else
395+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list ,
396+ lfu_entry_t )
397+ #endif
398+ {
399+ if (entry -> key == key )
400+ break ;
401+ }
402+ if (!entry || entry -> key != key )
403+ return NULL ;
404+
405+ /* We would translate the block with a frequency of more thanTHRESHOLD */
406+ if (entry -> frequency < THRESHOLD ) {
407+ list_del_init (& entry -> list );
408+ list_add (& entry -> list , cache -> lists [entry -> frequency ++ ]);
409+ }
410+ #endif
326411 /* return NULL if cache miss */
327412 return entry -> value ;
328413}
329414
330415void * cache_put (cache_t * cache , uint32_t key , void * value )
331416{
332417 void * delete_value = NULL ;
418+ #if RV32_HAS (ARC )
333419 assert (cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] <=
334420 cache -> capacity );
335421 /* Before adding new element to cach, we should check the status
@@ -343,7 +429,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
343429 list_del_init (& delete_target -> list );
344430 hlist_del_init (& delete_target -> ht_list );
345431 delete_value = delete_target -> value ;
346- free ( delete_target );
432+ mpool_free ( cache_mp , delete_target );
347433 cache -> list_size [LRU_ghost_list ]-- ;
348434 if (cache -> list_size [LRU_list ] &&
349435 cache -> list_size [LRU_list ] >= cache -> lru_capacity )
@@ -357,7 +443,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
357443 list_del_init (& delete_target -> list );
358444 hlist_del_init (& delete_target -> ht_list );
359445 delete_value = delete_target -> value ;
360- free ( delete_target );
446+ mpool_free ( cache_mp , delete_target );
361447 cache -> list_size [LRU_list ]-- ;
362448 }
363449 } else {
@@ -372,12 +458,12 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
372458 list_del_init (& delete_target -> list );
373459 hlist_del_init (& delete_target -> ht_list );
374460 delete_value = delete_target -> value ;
375- free ( delete_target );
461+ mpool_free ( cache_mp , delete_target );
376462 cache -> list_size [LFU_ghost_list ]-- ;
377463 }
378464 REPLACE_LIST (> , >=)
379465 }
380- arc_entry_t * new_entry = malloc ( sizeof ( arc_entry_t ) );
466+ arc_entry_t * new_entry = mpool_alloc ( cache_mp );
381467 new_entry -> key = key ;
382468 new_entry -> value = value ;
383469 /* check if all cache become LFU */
@@ -393,21 +479,63 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
393479 hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
394480
395481 CACHE_ASSERT (cache );
482+ #else
483+ assert (cache -> list_size <= cache -> capacity );
484+ /* Before adding new element to cach, we should check the status
485+ * of cache.
486+ */
487+ if (cache -> list_size == cache -> capacity ) {
488+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
489+ if (!list_empty (cache -> lists [i ])) {
490+ lfu_entry_t * delete_target =
491+ list_last_entry (cache -> lists [i ], lfu_entry_t , list );
492+ list_del_init (& delete_target -> list );
493+ hlist_del_init (& delete_target -> ht_list );
494+ delete_value = delete_target -> value ;
495+ cache -> list_size -- ;
496+ mpool_free (cache_mp , delete_target );
497+ break ;
498+ }
499+ }
500+ }
501+ lfu_entry_t * new_entry = mpool_alloc (cache_mp );
502+ new_entry -> key = key ;
503+ new_entry -> value = value ;
504+ new_entry -> frequency = 0 ;
505+ list_add (& new_entry -> list , cache -> lists [new_entry -> frequency ++ ]);
506+ cache -> list_size ++ ;
507+ hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
508+ assert (cache -> list_size <= cache -> capacity );
509+ #endif
396510 return delete_value ;
397511}
398512
399513void cache_free (cache_t * cache , void (* callback )(void * ))
400514{
515+ #if RV32_HAS (ARC )
401516 for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
402517 arc_entry_t * entry , * safe ;
403518#ifdef __HAVE_TYPEOF
404519 list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
405520#else
406521 list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
407522 arc_entry_t )
523+ #endif
524+ #else
525+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
526+ if (list_empty (cache -> lists [i ]))
527+ continue ;
528+ lfu_entry_t * entry , * safe ;
529+ #ifdef __HAVE_TYPEOF
530+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
531+ #else
532+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
533+ lfu_entry_t )
534+ #endif
408535#endif
409536 callback (entry -> value );
410537 }
538+ mpool_destory (cache_mp );
411539 free (cache -> map -> ht_list_head );
412540 free (cache -> map );
413541 free (cache );
0 commit comments