1010#include <string.h>
1111
1212#include "cache.h"
13+ #include "mpool.h"
1314
1415#define MIN (a , b ) ((a < b) ? a : b)
1516#define GOLDEN_RATIO_32 0x61C88647
1617#define HASH (val ) \
1718 (((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
19+ /* THRESHOLD is set to identify hot spots. Once the frequency of use for a block
20+ * exceeds the THRESHOLD, the JIT compiler flow is triggered. */
21+ #define THRESHOLD 1000
1822
1923static uint32_t cache_size , cache_size_bits ;
24+ static struct mpool * cache_mp ;
2025
26+ #if RV32_HAS (ARC )
2127/*
2228 * Adaptive Replacement Cache (ARC) improves the fundamental LRU strategy
2329 * by dividing the cache into two lists, T1 and T2. list T1 is for LRU
@@ -37,7 +43,7 @@ typedef enum {
3743 LFU_ghost_list ,
3844 N_CACHE_LIST_TYPES
3945} cache_list_t ;
40-
46+ #endif
4147struct list_head {
4248 struct list_head * prev , * next ;
4349};
@@ -50,6 +56,7 @@ struct hlist_node {
5056 struct hlist_node * next , * * pprev ;
5157};
5258
59+ #if RV32_HAS (ARC )
5360/*
5461 * list maintains four cache lists T1, T2, B1, and B2.
5562 * ht_list maintains hashtable and improves the performance of cache searching.
@@ -61,17 +68,31 @@ typedef struct {
6168 struct list_head list ;
6269 struct hlist_node ht_list ;
6370} arc_entry_t ;
71+ #else /* !RV32_HAS(ARC) */
72+ typedef struct {
73+ void * value ;
74+ uint32_t key ;
75+ uint32_t frequency ;
76+ struct list_head list ;
77+ struct hlist_node ht_list ;
78+ } lfu_entry_t ;
79+ #endif
6480
6581typedef struct {
6682 struct hlist_head * ht_list_head ;
6783} hashtable_t ;
6884
6985typedef struct cache {
86+ #if RV32_HAS (ARC )
7087 struct list_head * lists [N_CACHE_LIST_TYPES ];
7188 uint32_t list_size [N_CACHE_LIST_TYPES ];
89+ uint32_t lru_capacity ;
90+ #else /* !RV32_HAS(ARC) */
91+ struct list_head * lists [THRESHOLD ];
92+ uint32_t list_size ;
93+ #endif
7294 hashtable_t * map ;
7395 uint32_t capacity ;
74- uint32_t lru_capacity ;
7596} cache_t ;
7697
7798static inline void INIT_LIST_HEAD (struct list_head * head )
@@ -80,6 +101,13 @@ static inline void INIT_LIST_HEAD(struct list_head *head)
80101 head -> prev = head ;
81102}
82103
104+ #if !RV32_HAS (ARC )
105+ static inline int list_empty (const struct list_head * head )
106+ {
107+ return (head -> next == head );
108+ }
109+ #endif
110+
83111static inline void list_add (struct list_head * node , struct list_head * head )
84112{
85113 struct list_head * next = head -> next ;
@@ -107,6 +135,9 @@ static inline void list_del_init(struct list_head *node)
107135
108136#define list_entry (node , type , member ) container_of(node, type, member)
109137
138+ #define list_first_entry (head , type , member ) \
139+ list_entry((head)->next, type, member)
140+
110141#define list_last_entry (head , type , member ) \
111142 list_entry((head)->prev, type, member)
112143
@@ -194,14 +225,15 @@ static inline void hlist_del_init(struct hlist_node *n)
194225 pos = hlist_entry_safe((pos)->member.next, type, member))
195226#endif
196227
228+
197229cache_t * cache_create (int size_bits )
198230{
199231 cache_t * cache = malloc (sizeof (cache_t ));
200232 if (!cache )
201233 return NULL ;
202234 cache_size_bits = size_bits ;
203235 cache_size = 1 << size_bits ;
204-
236+ #if RV32_HAS ( ARC )
205237 for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
206238 cache -> lists [i ] = malloc (sizeof (struct list_head ));
207239 INIT_LIST_HEAD (cache -> lists [i ]);
@@ -224,12 +256,41 @@ cache_t *cache_create(int size_bits)
224256 for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
225257 INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
226258 }
259+ cache -> lru_capacity = cache_size / 2 ;
260+ cache_mp =
261+ mpool_create (cache_size * 2 * sizeof (arc_entry_t ), sizeof (arc_entry_t ));
262+ #else /* !RV32_HAS(ARC) */
263+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
264+ cache -> lists [i ] = malloc (sizeof (struct list_head ));
265+ INIT_LIST_HEAD (cache -> lists [i ]);
266+ }
227267
268+ cache -> map = malloc (sizeof (hashtable_t ));
269+ if (!cache -> map ) {
270+ free (cache -> lists );
271+ free (cache );
272+ return NULL ;
273+ }
274+ cache -> map -> ht_list_head = malloc (cache_size * sizeof (struct hlist_head ));
275+ if (!cache -> map -> ht_list_head ) {
276+ free (cache -> map );
277+ free (cache -> lists );
278+ free (cache );
279+ return NULL ;
280+ }
281+ for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
282+ INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
283+ }
284+ cache -> list_size = 0 ;
285+ cache_mp =
286+ mpool_create (cache_size * sizeof (lfu_entry_t ), sizeof (lfu_entry_t ));
287+ #endif
228288 cache -> capacity = cache_size ;
229- cache -> lru_capacity = cache_size / 2 ;
230289 return cache ;
231290}
232291
292+
293+ #if RV32_HAS (ARC )
233294/* Rules of ARC
234295 * 1. size of LRU_list + size of LFU_list <= c
235296 * 2. size of LRU_list + size of LRU_ghost_list <= c
@@ -273,12 +334,14 @@ static inline void move_to_mru(cache_t *cache,
273334 list_del_init (& entry -> list );
274335 list_add (& entry -> list , cache -> lists [type ]);
275336}
337+ #endif
276338
277339void * cache_get (cache_t * cache , uint32_t key )
278340{
279341 if (!cache -> capacity || hlist_empty (& cache -> map -> ht_list_head [HASH (key )]))
280342 return NULL ;
281343
344+ #if RV32_HAS (ARC )
282345 arc_entry_t * entry = NULL ;
283346#ifdef __HAVE_TYPEOF
284347 hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
@@ -323,13 +386,38 @@ void *cache_get(cache_t *cache, uint32_t key)
323386 }
324387
325388 CACHE_ASSERT (cache );
389+ #else /* !RV32_HAS(ARC) */
390+ lfu_entry_t * entry = NULL ;
391+ #ifdef __HAVE_TYPEOF
392+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
393+ #else
394+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list ,
395+ lfu_entry_t )
396+ #endif
397+ {
398+ if (entry -> key == key )
399+ break ;
400+ }
401+ if (!entry || entry -> key != key )
402+ return NULL ;
403+
404+ /* Once the frequency of use for a specific block exceeds the predetermined
405+ * THRESHOLD, we dispatch the block to the code generator for the purpose of
406+ * generating C code. Subsequently, the generated C code is compiled into
407+ * machine code by the target compiler. */
408+ if (entry -> frequency < THRESHOLD ) {
409+ list_del_init (& entry -> list );
410+ list_add (& entry -> list , cache -> lists [entry -> frequency ++ ]);
411+ }
412+ #endif
326413 /* return NULL if cache miss */
327414 return entry -> value ;
328415}
329416
330417void * cache_put (cache_t * cache , uint32_t key , void * value )
331418{
332419 void * delete_value = NULL ;
420+ #if RV32_HAS (ARC )
333421 assert (cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] <=
334422 cache -> capacity );
335423 /* Before adding new element to cach, we should check the status
@@ -343,7 +431,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
343431 list_del_init (& delete_target -> list );
344432 hlist_del_init (& delete_target -> ht_list );
345433 delete_value = delete_target -> value ;
346- free ( delete_target );
434+ mpool_free ( cache_mp , delete_target );
347435 cache -> list_size [LRU_ghost_list ]-- ;
348436 if (cache -> list_size [LRU_list ] &&
349437 cache -> list_size [LRU_list ] >= cache -> lru_capacity )
@@ -357,7 +445,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
357445 list_del_init (& delete_target -> list );
358446 hlist_del_init (& delete_target -> ht_list );
359447 delete_value = delete_target -> value ;
360- free ( delete_target );
448+ mpool_free ( cache_mp , delete_target );
361449 cache -> list_size [LRU_list ]-- ;
362450 }
363451 } else {
@@ -372,12 +460,12 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
372460 list_del_init (& delete_target -> list );
373461 hlist_del_init (& delete_target -> ht_list );
374462 delete_value = delete_target -> value ;
375- free ( delete_target );
463+ mpool_free ( cache_mp , delete_target );
376464 cache -> list_size [LFU_ghost_list ]-- ;
377465 }
378466 REPLACE_LIST (> , >=)
379467 }
380- arc_entry_t * new_entry = malloc ( sizeof ( arc_entry_t ) );
468+ arc_entry_t * new_entry = mpool_alloc ( cache_mp );
381469 new_entry -> key = key ;
382470 new_entry -> value = value ;
383471 /* check if all cache become LFU */
@@ -393,21 +481,61 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
393481 hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
394482
395483 CACHE_ASSERT (cache );
484+ #else /* !RV32_HAS(ARC) */
485+ assert (cache -> list_size <= cache -> capacity );
486+ /* check the cache is full or not before adding a new entry */
487+ if (cache -> list_size == cache -> capacity ) {
488+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
489+ if (list_empty (cache -> lists [i ]))
490+ continue ;
491+ lfu_entry_t * delete_target =
492+ list_last_entry (cache -> lists [i ], lfu_entry_t , list );
493+ list_del_init (& delete_target -> list );
494+ hlist_del_init (& delete_target -> ht_list );
495+ delete_value = delete_target -> value ;
496+ cache -> list_size -- ;
497+ mpool_free (cache_mp , delete_target );
498+ break ;
499+ }
500+ }
501+ lfu_entry_t * new_entry = mpool_alloc (cache_mp );
502+ new_entry -> key = key ;
503+ new_entry -> value = value ;
504+ new_entry -> frequency = 0 ;
505+ list_add (& new_entry -> list , cache -> lists [new_entry -> frequency ++ ]);
506+ cache -> list_size ++ ;
507+ hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
508+ assert (cache -> list_size <= cache -> capacity );
509+ #endif
396510 return delete_value ;
397511}
398512
399513void cache_free (cache_t * cache , void (* callback )(void * ))
400514{
515+ #if RV32_HAS (ARC )
401516 for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
402517 arc_entry_t * entry , * safe ;
403518#ifdef __HAVE_TYPEOF
404519 list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
405520#else
406521 list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
407522 arc_entry_t )
523+ #endif
524+ #else /* !RV32_HAS(ARC) */
525+ for (int i = 0 ; i < THRESHOLD ; i ++ ) {
526+ if (list_empty (cache -> lists [i ]))
527+ continue ;
528+ lfu_entry_t * entry , * safe ;
529+ #ifdef __HAVE_TYPEOF
530+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
531+ #else
532+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
533+ lfu_entry_t )
534+ #endif
408535#endif
409536 callback (entry -> value );
410537 }
538+ mpool_destory (cache_mp );
411539 free (cache -> map -> ht_list_head );
412540 free (cache -> map );
413541 free (cache );
0 commit comments