1010#include <string.h>
1111
1212#include "cache.h"
13+ #include "mpool.h"
1314
1415#define MIN (a , b ) ((a < b) ? a : b)
1516#define GOLDEN_RATIO_32 0x61C88647
1617#define HASH (val ) \
1718 (((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
1819
1920static uint32_t cache_size , cache_size_bits ;
20-
21+ struct mpool * cache_mp ;
22+ #define ARC
23+ #ifdef ARC
2124/*
2225 * Adaptive Replacement Cache (ARC) improves the fundamental LRU strategy
2326 * by dividing the cache into two lists, T1 and T2. list T1 is for LRU
@@ -30,14 +33,15 @@ static uint32_t cache_size, cache_size_bits;
3033 * we increase T1's size while decreasing T2. But, if the cache hit occurs in
3134 * B2, we would increase the size of T2 and decrease the size of T1.
3235 */
36+
3337typedef enum {
3438 LRU_list ,
3539 LFU_list ,
3640 LRU_ghost_list ,
3741 LFU_ghost_list ,
3842 N_CACHE_LIST_TYPES
3943} cache_list_t ;
40-
44+ #endif
4145struct list_head {
4246 struct list_head * prev , * next ;
4347};
@@ -50,6 +54,7 @@ struct hlist_node {
5054 struct hlist_node * next , * * pprev ;
5155};
5256
57+ #if RV32_HAS (ARC )
5358/*
5459 * list maintains four cache lists T1, T2, B1, and B2.
5560 * ht_list maintains hashtable and improves the performance of cache searching.
@@ -61,17 +66,31 @@ typedef struct {
6166 struct list_head list ;
6267 struct hlist_node ht_list ;
6368} arc_entry_t ;
69+ #else
70+ typedef struct {
71+ void * value ;
72+ uint32_t key ;
73+ uint32_t frequency ;
74+ struct list_head list ;
75+ struct hlist_node ht_list ;
76+ } lfu_entry_t ;
77+ #endif
6478
6579typedef struct {
6680 struct hlist_head * ht_list_head ;
6781} hashtable_t ;
6882
6983typedef struct cache {
84+ #if RV32_HAS (ARC )
7085 struct list_head * lists [N_CACHE_LIST_TYPES ];
7186 uint32_t list_size [N_CACHE_LIST_TYPES ];
87+ uint32_t lru_capacity ;
88+ #else
89+ struct list_head * lists [1000 ];
90+ uint32_t list_size ;
91+ #endif
7292 hashtable_t * map ;
7393 uint32_t capacity ;
74- uint32_t lru_capacity ;
7594} cache_t ;
7695
7796static inline void INIT_LIST_HEAD (struct list_head * head )
@@ -80,6 +99,11 @@ static inline void INIT_LIST_HEAD(struct list_head *head)
8099 head -> prev = head ;
81100}
82101
102+ static inline int list_empty (const struct list_head * head )
103+ {
104+ return (head -> next == head );
105+ }
106+
83107static inline void list_add (struct list_head * node , struct list_head * head )
84108{
85109 struct list_head * next = head -> next ;
@@ -107,6 +131,9 @@ static inline void list_del_init(struct list_head *node)
107131
108132#define list_entry (node , type , member ) container_of(node, type, member)
109133
134+ #define list_first_entry (head , type , member ) \
135+ list_entry((head)->next, type, member)
136+
110137#define list_last_entry (head , type , member ) \
111138 list_entry((head)->prev, type, member)
112139
@@ -194,14 +221,15 @@ static inline void hlist_del_init(struct hlist_node *n)
194221 pos = hlist_entry_safe((pos)->member.next, type, member))
195222#endif
196223
224+
197225cache_t * cache_create (int size_bits )
198226{
199227 cache_t * cache = malloc (sizeof (cache_t ));
200228 if (!cache )
201229 return NULL ;
202230 cache_size_bits = size_bits ;
203231 cache_size = 1 << size_bits ;
204-
232+ #if RV32_HAS ( ARC )
205233 for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
206234 cache -> lists [i ] = malloc (sizeof (struct list_head ));
207235 INIT_LIST_HEAD (cache -> lists [i ]);
@@ -224,12 +252,41 @@ cache_t *cache_create(int size_bits)
224252 for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
225253 INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
226254 }
255+ cache -> lru_capacity = cache_size / 2 ;
256+ cache_mp =
257+ mpool_create (cache_size * 2 * sizeof (arc_entry_t ), sizeof (arc_entry_t ));
258+ #else
259+ for (int i = 0 ; i < 1000 ; i ++ ) {
260+ cache -> lists [i ] = malloc (sizeof (struct list_head ));
261+ INIT_LIST_HEAD (cache -> lists [i ]);
262+ }
227263
264+ cache -> map = malloc (sizeof (hashtable_t ));
265+ if (!cache -> map ) {
266+ free (cache -> lists );
267+ free (cache );
268+ return NULL ;
269+ }
270+ cache -> map -> ht_list_head = malloc (cache_size * sizeof (struct hlist_head ));
271+ if (!cache -> map -> ht_list_head ) {
272+ free (cache -> map );
273+ free (cache -> lists );
274+ free (cache );
275+ return NULL ;
276+ }
277+ for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
278+ INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
279+ }
280+ cache -> list_size = 0 ;
281+ cache_mp =
282+ mpool_create (cache_size * sizeof (lfu_entry_t ), sizeof (lfu_entry_t ));
283+ #endif
228284 cache -> capacity = cache_size ;
229- cache -> lru_capacity = cache_size / 2 ;
230285 return cache ;
231286}
232287
288+
289+ #if RV32_HAS (ARC )
233290/* Rules of ARC
234291 * 1. size of LRU_list + size of LFU_list <= c
235292 * 2. size of LRU_list + size of LRU_ghost_list <= c
@@ -273,12 +330,14 @@ static inline void move_to_mru(cache_t *cache,
273330 list_del_init (& entry -> list );
274331 list_add (& entry -> list , cache -> lists [type ]);
275332}
333+ #endif
276334
277335void * cache_get (cache_t * cache , uint32_t key )
278336{
279337 if (!cache -> capacity || hlist_empty (& cache -> map -> ht_list_head [HASH (key )]))
280338 return NULL ;
281339
340+ #if RV32_HAS (ARC )
282341 arc_entry_t * entry = NULL ;
283342#ifdef __HAVE_TYPEOF
284343 hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
@@ -323,13 +382,35 @@ void *cache_get(cache_t *cache, uint32_t key)
323382 }
324383
325384 CACHE_ASSERT (cache );
385+ #else
386+ lfu_entry_t * entry = NULL ;
387+ #ifdef __HAVE_TYPEOF
388+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
389+ #else
390+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list ,
391+ lfu_entry_t )
392+ #endif
393+ {
394+ if (entry -> key == key )
395+ break ;
396+ }
397+ if (!entry || entry -> key != key )
398+ return NULL ;
399+
400+ /* We would translate the block with a frequency of more than 1000 */
401+ if (entry -> frequency < 1000 ) {
402+ list_del_init (& entry -> list );
403+ list_add (& entry -> list , cache -> lists [entry -> frequency ++ ]);
404+ }
405+ #endif
326406 /* return NULL if cache miss */
327407 return entry -> value ;
328408}
329409
330410void * cache_put (cache_t * cache , uint32_t key , void * value )
331411{
332412 void * delete_value = NULL ;
413+ #if RV32_HAS (ARC )
333414 assert (cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] <=
334415 cache -> capacity );
335416 /* Before adding new element to cach, we should check the status
@@ -343,7 +424,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
343424 list_del_init (& delete_target -> list );
344425 hlist_del_init (& delete_target -> ht_list );
345426 delete_value = delete_target -> value ;
346- free ( delete_target );
427+ mpool_free ( cache_mp , delete_target );
347428 cache -> list_size [LRU_ghost_list ]-- ;
348429 if (cache -> list_size [LRU_list ] &&
349430 cache -> list_size [LRU_list ] >= cache -> lru_capacity )
@@ -357,7 +438,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
357438 list_del_init (& delete_target -> list );
358439 hlist_del_init (& delete_target -> ht_list );
359440 delete_value = delete_target -> value ;
360- free ( delete_target );
441+ mpool_free ( cache_mp , delete_target );
361442 cache -> list_size [LRU_list ]-- ;
362443 }
363444 } else {
@@ -372,12 +453,12 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
372453 list_del_init (& delete_target -> list );
373454 hlist_del_init (& delete_target -> ht_list );
374455 delete_value = delete_target -> value ;
375- free ( delete_target );
456+ mpool_free ( cache_mp , delete_target );
376457 cache -> list_size [LFU_ghost_list ]-- ;
377458 }
378459 REPLACE_LIST (> , >=)
379460 }
380- arc_entry_t * new_entry = malloc ( sizeof ( arc_entry_t ) );
461+ arc_entry_t * new_entry = mpool_alloc ( cache_mp );
381462 new_entry -> key = key ;
382463 new_entry -> value = value ;
383464 /* check if all cache become LFU */
@@ -393,21 +474,63 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
393474 hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
394475
395476 CACHE_ASSERT (cache );
477+ #else
478+ assert (cache -> list_size <= cache -> capacity );
479+ /* Before adding new element to cach, we should check the status
480+ * of cache.
481+ */
482+ if (cache -> list_size == cache -> capacity ) {
483+ for (int i = 0 ; i < 1000 ; i ++ ) {
484+ if (!list_empty (cache -> lists [i ])) {
485+ lfu_entry_t * delete_target =
486+ list_last_entry (cache -> lists [i ], lfu_entry_t , list );
487+ list_del_init (& delete_target -> list );
488+ hlist_del_init (& delete_target -> ht_list );
489+ delete_value = delete_target -> value ;
490+ cache -> list_size -- ;
491+ mpool_free (cache_mp , delete_target );
492+ break ;
493+ }
494+ }
495+ }
496+ lfu_entry_t * new_entry = mpool_alloc (cache_mp );
497+ new_entry -> key = key ;
498+ new_entry -> value = value ;
499+ new_entry -> frequency = 0 ;
500+ list_add (& new_entry -> list , cache -> lists [new_entry -> frequency ++ ]);
501+ cache -> list_size ++ ;
502+ hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
503+ assert (cache -> list_size <= cache -> capacity );
504+ #endif
396505 return delete_value ;
397506}
398507
399508void cache_free (cache_t * cache , void (* callback )(void * ))
400509{
510+ #if RV32_HAS (ARC )
401511 for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
402512 arc_entry_t * entry , * safe ;
403513#ifdef __HAVE_TYPEOF
404514 list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
405515#else
406516 list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
407517 arc_entry_t )
518+ #endif
519+ #else
520+ for (int i = 0 ; i < 1000 ; i ++ ) {
521+ if (list_empty (cache -> lists [i ]))
522+ continue ;
523+ lfu_entry_t * entry , * safe ;
524+ #ifdef __HAVE_TYPEOF
525+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
526+ #else
527+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
528+ lfu_entry_t )
529+ #endif
408530#endif
409531 callback (entry -> value );
410532 }
533+ mpool_destory (cache_mp );
411534 free (cache -> map -> ht_list_head );
412535 free (cache -> map );
413536 free (cache );
0 commit comments