Skip to content

Commit fad834c

Browse files
committed
Refactor ARC into LFU and integrate mpool
We have implemented an adaptive replacement cache to reduce memory usage. However, the complexity of the replacement policy of ARC can lead to a decrease in overall performance. As a result, we implemented the LFU cache, which performed better, but we still retained the ARC for future use. Additionally, we imported the memory pool we developed to limit the memory usage of both caches. The statistics below illustrate the performance of the LFU cache and ARC while running the CoreMark benchmark, indicating that the LFU cache outperforms ARC. | Test | CoreMark(Iterations/Sec) | |------+----------------------------| | ARC | 1123.776 | | LFU | 1155.174 |
1 parent bcdc953 commit fad834c

26 files changed

+3795
-19
lines changed

Makefile

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,10 @@ ifeq ($(call has, EXT_F), 1)
4040
LDFLAGS += -lm
4141
endif
4242

43+
# Enable adaptive replacement cache policy, default is LRU
44+
ENABLE_ARC ?= 0
45+
$(call set-feature, ARC)
46+
4347
# Experimental SDL oriented system calls
4448
ENABLE_SDL ?= 1
4549
ifeq ($(call has, SDL), 1)

mk/tests.mk

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,22 @@ CACHE_OBJS := $(addprefix $(CACHE_BUILD_DIR)/, $(CACHE_OBJS))
99
OBJS += $(CACHE_OBJS)
1010
deps += $(CACHE_OBJS:%.o=%.o.d)
1111

12-
12+
ifeq ($(ENABLE_ARC), 1)
13+
CACHE_CHECK_ELF_FILES := \
14+
ARC/cache-new \
15+
ARC/cache-put \
16+
ARC/cache-get \
17+
ARC/cache-lru-replace \
18+
ARC/cache-lfu-replace \
19+
ARC/cache-lru-ghost-replace \
20+
ARC/cache-lfu-ghost-replace
21+
else
1322
CACHE_CHECK_ELF_FILES := \
14-
cache-new \
15-
cache-put \
16-
cache-get \
17-
cache-lru-replace \
18-
cache-lfu-replace \
19-
cache-lru-ghost-replace \
20-
cache-lfu-ghost-replace
23+
LFU/cache-new \
24+
LFU/cache-put \
25+
LFU/cache-get \
26+
LFU/cache-lfu-replace
27+
endif
2128

2229
CACHE_OUT = $(addprefix $(CACHE_BUILD_DIR)/, $(CACHE_CHECK_ELF_FILES:%=%.out))
2330

@@ -39,9 +46,9 @@ $(CACHE_OUT): $(TARGET)
3946

4047
$(TARGET): $(CACHE_OBJS)
4148
$(VECHO) " CC\t$@\n"
42-
$(Q)$(CC) $^ build/cache.o -o $(CACHE_BUILD_DIR)/$(TARGET)
49+
$(Q)$(CC) $^ build/cache.o build/mpool.o -o $(CACHE_BUILD_DIR)/$(TARGET)
4350

4451
$(CACHE_BUILD_DIR)/%.o: $(CACHE_TEST_DIR)/%.c
4552
$(VECHO) " CC\t$@\n"
46-
$(Q)mkdir -p $(dir $@)
53+
$(Q)mkdir -p $(dir $@)/ARC $(dir $@)/LFU
4754
$(Q)$(CC) -o $@ $(CFLAGS) -I./src -c -MMD -MF $@.d $<

src/cache.c

Lines changed: 132 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,17 @@
1010
#include <string.h>
1111

1212
#include "cache.h"
13+
#include "mpool.h"
1314

1415
#define MIN(a, b) ((a < b) ? a : b)
1516
#define GOLDEN_RATIO_32 0x61C88647
1617
#define HASH(val) \
1718
(((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
1819

1920
static uint32_t cache_size, cache_size_bits;
20-
21+
struct mpool *cache_mp;
22+
#define ARC
23+
#ifdef ARC
2124
/*
2225
* Adaptive Replacement Cache (ARC) improves the fundamental LRU strategy
2326
* by dividing the cache into two lists, T1 and T2. list T1 is for LRU
@@ -30,14 +33,15 @@ static uint32_t cache_size, cache_size_bits;
3033
* we increase T1's size while decreasing T2. But, if the cache hit occurs in
3134
* B2, we would increase the size of T2 and decrease the size of T1.
3235
*/
36+
3337
typedef enum {
3438
LRU_list,
3539
LFU_list,
3640
LRU_ghost_list,
3741
LFU_ghost_list,
3842
N_CACHE_LIST_TYPES
3943
} cache_list_t;
40-
44+
#endif
4145
struct list_head {
4246
struct list_head *prev, *next;
4347
};
@@ -50,6 +54,7 @@ struct hlist_node {
5054
struct hlist_node *next, **pprev;
5155
};
5256

57+
#if RV32_HAS(ARC)
5358
/*
5459
* list maintains four cache lists T1, T2, B1, and B2.
5560
* ht_list maintains hashtable and improves the performance of cache searching.
@@ -61,17 +66,31 @@ typedef struct {
6166
struct list_head list;
6267
struct hlist_node ht_list;
6368
} arc_entry_t;
69+
#else
70+
typedef struct {
71+
void *value;
72+
uint32_t key;
73+
uint32_t frequency;
74+
struct list_head list;
75+
struct hlist_node ht_list;
76+
} lfu_entry_t;
77+
#endif
6478

6579
typedef struct {
6680
struct hlist_head *ht_list_head;
6781
} hashtable_t;
6882

6983
typedef struct cache {
84+
#if RV32_HAS(ARC)
7085
struct list_head *lists[N_CACHE_LIST_TYPES];
7186
uint32_t list_size[N_CACHE_LIST_TYPES];
87+
uint32_t lru_capacity;
88+
#else
89+
struct list_head *lists[1000];
90+
uint32_t list_size;
91+
#endif
7292
hashtable_t *map;
7393
uint32_t capacity;
74-
uint32_t lru_capacity;
7594
} cache_t;
7695

7796
static inline void INIT_LIST_HEAD(struct list_head *head)
@@ -80,6 +99,11 @@ static inline void INIT_LIST_HEAD(struct list_head *head)
8099
head->prev = head;
81100
}
82101

102+
static inline int list_empty(const struct list_head *head)
103+
{
104+
return (head->next == head);
105+
}
106+
83107
static inline void list_add(struct list_head *node, struct list_head *head)
84108
{
85109
struct list_head *next = head->next;
@@ -107,6 +131,9 @@ static inline void list_del_init(struct list_head *node)
107131

108132
#define list_entry(node, type, member) container_of(node, type, member)
109133

134+
#define list_first_entry(head, type, member) \
135+
list_entry((head)->next, type, member)
136+
110137
#define list_last_entry(head, type, member) \
111138
list_entry((head)->prev, type, member)
112139

@@ -194,14 +221,15 @@ static inline void hlist_del_init(struct hlist_node *n)
194221
pos = hlist_entry_safe((pos)->member.next, type, member))
195222
#endif
196223

224+
197225
cache_t *cache_create(int size_bits)
198226
{
199227
cache_t *cache = malloc(sizeof(cache_t));
200228
if (!cache)
201229
return NULL;
202230
cache_size_bits = size_bits;
203231
cache_size = 1 << size_bits;
204-
232+
#if RV32_HAS(ARC)
205233
for (int i = 0; i < N_CACHE_LIST_TYPES; i++) {
206234
cache->lists[i] = malloc(sizeof(struct list_head));
207235
INIT_LIST_HEAD(cache->lists[i]);
@@ -224,12 +252,41 @@ cache_t *cache_create(int size_bits)
224252
for (uint32_t i = 0; i < cache_size; i++) {
225253
INIT_HLIST_HEAD(&cache->map->ht_list_head[i]);
226254
}
255+
cache->lru_capacity = cache_size / 2;
256+
cache_mp =
257+
mpool_create(cache_size * 2 * sizeof(arc_entry_t), sizeof(arc_entry_t));
258+
#else
259+
for (int i = 0; i < 1000; i++) {
260+
cache->lists[i] = malloc(sizeof(struct list_head));
261+
INIT_LIST_HEAD(cache->lists[i]);
262+
}
227263

264+
cache->map = malloc(sizeof(hashtable_t));
265+
if (!cache->map) {
266+
free(cache->lists);
267+
free(cache);
268+
return NULL;
269+
}
270+
cache->map->ht_list_head = malloc(cache_size * sizeof(struct hlist_head));
271+
if (!cache->map->ht_list_head) {
272+
free(cache->map);
273+
free(cache->lists);
274+
free(cache);
275+
return NULL;
276+
}
277+
for (uint32_t i = 0; i < cache_size; i++) {
278+
INIT_HLIST_HEAD(&cache->map->ht_list_head[i]);
279+
}
280+
cache->list_size = 0;
281+
cache_mp =
282+
mpool_create(cache_size * sizeof(lfu_entry_t), sizeof(lfu_entry_t));
283+
#endif
228284
cache->capacity = cache_size;
229-
cache->lru_capacity = cache_size / 2;
230285
return cache;
231286
}
232287

288+
289+
#if RV32_HAS(ARC)
233290
/* Rules of ARC
234291
* 1. size of LRU_list + size of LFU_list <= c
235292
* 2. size of LRU_list + size of LRU_ghost_list <= c
@@ -273,12 +330,14 @@ static inline void move_to_mru(cache_t *cache,
273330
list_del_init(&entry->list);
274331
list_add(&entry->list, cache->lists[type]);
275332
}
333+
#endif
276334

277335
void *cache_get(cache_t *cache, uint32_t key)
278336
{
279337
if (!cache->capacity || hlist_empty(&cache->map->ht_list_head[HASH(key)]))
280338
return NULL;
281339

340+
#if RV32_HAS(ARC)
282341
arc_entry_t *entry = NULL;
283342
#ifdef __HAVE_TYPEOF
284343
hlist_for_each_entry (entry, &cache->map->ht_list_head[HASH(key)], ht_list)
@@ -323,13 +382,35 @@ void *cache_get(cache_t *cache, uint32_t key)
323382
}
324383

325384
CACHE_ASSERT(cache);
385+
#else
386+
lfu_entry_t *entry = NULL;
387+
#ifdef __HAVE_TYPEOF
388+
hlist_for_each_entry (entry, &cache->map->ht_list_head[HASH(key)], ht_list)
389+
#else
390+
hlist_for_each_entry (entry, &cache->map->ht_list_head[HASH(key)], ht_list,
391+
lfu_entry_t)
392+
#endif
393+
{
394+
if (entry->key == key)
395+
break;
396+
}
397+
if (!entry || entry->key != key)
398+
return NULL;
399+
400+
/* We would translate the block with a frequency of more than 1000 */
401+
if (entry->frequency < 1000) {
402+
list_del_init(&entry->list);
403+
list_add(&entry->list, cache->lists[entry->frequency++]);
404+
}
405+
#endif
326406
/* return NULL if cache miss */
327407
return entry->value;
328408
}
329409

330410
void *cache_put(cache_t *cache, uint32_t key, void *value)
331411
{
332412
void *delete_value = NULL;
413+
#if RV32_HAS(ARC)
333414
assert(cache->list_size[LRU_list] + cache->list_size[LRU_ghost_list] <=
334415
cache->capacity);
335416
/* Before adding new element to cach, we should check the status
@@ -343,7 +424,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
343424
list_del_init(&delete_target->list);
344425
hlist_del_init(&delete_target->ht_list);
345426
delete_value = delete_target->value;
346-
free(delete_target);
427+
mpool_free(cache_mp, delete_target);
347428
cache->list_size[LRU_ghost_list]--;
348429
if (cache->list_size[LRU_list] &&
349430
cache->list_size[LRU_list] >= cache->lru_capacity)
@@ -357,7 +438,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
357438
list_del_init(&delete_target->list);
358439
hlist_del_init(&delete_target->ht_list);
359440
delete_value = delete_target->value;
360-
free(delete_target);
441+
mpool_free(cache_mp, delete_target);
361442
cache->list_size[LRU_list]--;
362443
}
363444
} else {
@@ -372,12 +453,12 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
372453
list_del_init(&delete_target->list);
373454
hlist_del_init(&delete_target->ht_list);
374455
delete_value = delete_target->value;
375-
free(delete_target);
456+
mpool_free(cache_mp, delete_target);
376457
cache->list_size[LFU_ghost_list]--;
377458
}
378459
REPLACE_LIST(>, >=)
379460
}
380-
arc_entry_t *new_entry = malloc(sizeof(arc_entry_t));
461+
arc_entry_t *new_entry = mpool_alloc(cache_mp);
381462
new_entry->key = key;
382463
new_entry->value = value;
383464
/* check if all cache become LFU */
@@ -393,21 +474,63 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
393474
hlist_add_head(&new_entry->ht_list, &cache->map->ht_list_head[HASH(key)]);
394475

395476
CACHE_ASSERT(cache);
477+
#else
478+
assert(cache->list_size <= cache->capacity);
479+
/* Before adding new element to cach, we should check the status
480+
* of cache.
481+
*/
482+
if (cache->list_size == cache->capacity) {
483+
for (int i = 0; i < 1000; i++) {
484+
if (!list_empty(cache->lists[i])) {
485+
lfu_entry_t *delete_target =
486+
list_last_entry(cache->lists[i], lfu_entry_t, list);
487+
list_del_init(&delete_target->list);
488+
hlist_del_init(&delete_target->ht_list);
489+
delete_value = delete_target->value;
490+
cache->list_size--;
491+
mpool_free(cache_mp, delete_target);
492+
break;
493+
}
494+
}
495+
}
496+
lfu_entry_t *new_entry = mpool_alloc(cache_mp);
497+
new_entry->key = key;
498+
new_entry->value = value;
499+
new_entry->frequency = 0;
500+
list_add(&new_entry->list, cache->lists[new_entry->frequency++]);
501+
cache->list_size++;
502+
hlist_add_head(&new_entry->ht_list, &cache->map->ht_list_head[HASH(key)]);
503+
assert(cache->list_size <= cache->capacity);
504+
#endif
396505
return delete_value;
397506
}
398507

399508
void cache_free(cache_t *cache, void (*callback)(void *))
400509
{
510+
#if RV32_HAS(ARC)
401511
for (int i = 0; i < N_CACHE_LIST_TYPES; i++) {
402512
arc_entry_t *entry, *safe;
403513
#ifdef __HAVE_TYPEOF
404514
list_for_each_entry_safe (entry, safe, cache->lists[i], list)
405515
#else
406516
list_for_each_entry_safe (entry, safe, cache->lists[i], list,
407517
arc_entry_t)
518+
#endif
519+
#else
520+
for (int i = 0; i < 1000; i++) {
521+
if (list_empty(cache->lists[i]))
522+
continue;
523+
lfu_entry_t *entry, *safe;
524+
#ifdef __HAVE_TYPEOF
525+
list_for_each_entry_safe (entry, safe, cache->lists[i], list)
526+
#else
527+
list_for_each_entry_safe (entry, safe, cache->lists[i], list,
528+
lfu_entry_t)
529+
#endif
408530
#endif
409531
callback(entry->value);
410532
}
533+
mpool_destory(cache_mp);
411534
free(cache->map->ht_list_head);
412535
free(cache->map);
413536
free(cache);

src/feature.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,11 @@
4848
#define RV32_FEATURE_GDBSTUB 1
4949
#endif
5050

51+
/* Import adaptive replacement cache to manage block */
52+
#ifndef RV32_FEATURE_ARC
53+
#define RV32_FEATURE_ARC 1
54+
#endif
55+
5156
/* Feature test macro */
5257
#define RV32_HAS(x) RV32_FEATURE_##x
5358

tests/cache/ARC/cache-get.expect

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
NEW CACHE
2+
NULL
3+
NULL
4+
3
5+
FREE CACHE
File renamed without changes.

0 commit comments

Comments
 (0)