Skip to content

Commit 63b97df

Browse files
committed
Refactor ARC into LFU and integrate mpool
We have implemented an adaptive replacement cache to reduce memory usage. However, the complexity of the replacement policy of ARC can lead to a decrease in overall performance. As a result, we implemented the LFU cache, which performed better, but we still retained the ARC for future use. Additionally, we imported the memory pool we developed to limit the memory usage of both caches. The statistics below illustrate the performance of the LFU cache and ARC while running the CoreMark benchmark, indicating that the LFU cache outperforms ARC. | Test | CoreMark(Iterations/Sec) | |------+----------------------------| | ARC | 1123.776 | | LFU | 1155.174 |
1 parent bcdc953 commit 63b97df

26 files changed

+3796
-18
lines changed

Makefile

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,10 @@ ifeq ($(call has, EXT_F), 1)
4040
LDFLAGS += -lm
4141
endif
4242

43+
# Enable adaptive replacement cache policy, default is LRU
44+
ENABLE_ARC ?= 1
45+
$(call set-feature, ARC)
46+
4347
# Experimental SDL oriented system calls
4448
ENABLE_SDL ?= 1
4549
ifeq ($(call has, SDL), 1)

mk/tests.mk

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,23 @@ CACHE_OBJS := $(addprefix $(CACHE_BUILD_DIR)/, $(CACHE_OBJS))
99
OBJS += $(CACHE_OBJS)
1010
deps += $(CACHE_OBJS:%.o=%.o.d)
1111

12-
12+
# Check adaptive replacement cache policy is enabled or not, default is LRU
13+
ifeq ($(ENABLE_ARC), 1)
14+
CACHE_CHECK_ELF_FILES := \
15+
arc/cache-new \
16+
arc/cache-put \
17+
arc/cache-get \
18+
arc/cache-lru-replace \
19+
arc/cache-lfu-replace \
20+
arc/cache-lru-ghost-replace \
21+
arc/cache-lfu-ghost-replace
22+
else
1323
CACHE_CHECK_ELF_FILES := \
14-
cache-new \
15-
cache-put \
16-
cache-get \
17-
cache-lru-replace \
18-
cache-lfu-replace \
19-
cache-lru-ghost-replace \
20-
cache-lfu-ghost-replace
24+
lfu/cache-new \
25+
lfu/cache-put \
26+
lfu/cache-get \
27+
lfu/cache-lfu-replace
28+
endif
2129

2230
CACHE_OUT = $(addprefix $(CACHE_BUILD_DIR)/, $(CACHE_CHECK_ELF_FILES:%=%.out))
2331

@@ -39,9 +47,9 @@ $(CACHE_OUT): $(TARGET)
3947

4048
$(TARGET): $(CACHE_OBJS)
4149
$(VECHO) " CC\t$@\n"
42-
$(Q)$(CC) $^ build/cache.o -o $(CACHE_BUILD_DIR)/$(TARGET)
50+
$(Q)$(CC) $^ build/cache.o build/mpool.o -o $(CACHE_BUILD_DIR)/$(TARGET)
4351

4452
$(CACHE_BUILD_DIR)/%.o: $(CACHE_TEST_DIR)/%.c
4553
$(VECHO) " CC\t$@\n"
46-
$(Q)mkdir -p $(dir $@)
54+
$(Q)mkdir -p $(dir $@)/arc $(dir $@)/lfu
4755
$(Q)$(CC) -o $@ $(CFLAGS) -I./src -c -MMD -MF $@.d $<

src/cache.c

Lines changed: 132 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,18 @@
1010
#include <string.h>
1111

1212
#include "cache.h"
13+
#include "mpool.h"
1314

1415
#define MIN(a, b) ((a < b) ? a : b)
1516
#define GOLDEN_RATIO_32 0x61C88647
1617
#define HASH(val) \
1718
(((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
19+
#define THRESHOLD 1000
1820

1921
static uint32_t cache_size, cache_size_bits;
22+
static struct mpool *cache_mp;
2023

24+
#if RV32_HAS(ARC)
2125
/*
2226
* Adaptive Replacement Cache (ARC) improves the fundamental LRU strategy
2327
* by dividing the cache into two lists, T1 and T2. list T1 is for LRU
@@ -30,14 +34,15 @@ static uint32_t cache_size, cache_size_bits;
3034
* we increase T1's size while decreasing T2. But, if the cache hit occurs in
3135
* B2, we would increase the size of T2 and decrease the size of T1.
3236
*/
37+
3338
typedef enum {
3439
LRU_list,
3540
LFU_list,
3641
LRU_ghost_list,
3742
LFU_ghost_list,
3843
N_CACHE_LIST_TYPES
3944
} cache_list_t;
40-
45+
#endif
4146
struct list_head {
4247
struct list_head *prev, *next;
4348
};
@@ -50,6 +55,7 @@ struct hlist_node {
5055
struct hlist_node *next, **pprev;
5156
};
5257

58+
#if RV32_HAS(ARC)
5359
/*
5460
* list maintains four cache lists T1, T2, B1, and B2.
5561
* ht_list maintains hashtable and improves the performance of cache searching.
@@ -61,17 +67,31 @@ typedef struct {
6167
struct list_head list;
6268
struct hlist_node ht_list;
6369
} arc_entry_t;
70+
#else
71+
typedef struct {
72+
void *value;
73+
uint32_t key;
74+
uint32_t frequency;
75+
struct list_head list;
76+
struct hlist_node ht_list;
77+
} lfu_entry_t;
78+
#endif
6479

6580
typedef struct {
6681
struct hlist_head *ht_list_head;
6782
} hashtable_t;
6883

6984
typedef struct cache {
85+
#if RV32_HAS(ARC)
7086
struct list_head *lists[N_CACHE_LIST_TYPES];
7187
uint32_t list_size[N_CACHE_LIST_TYPES];
88+
uint32_t lru_capacity;
89+
#else
90+
struct list_head *lists[THRESHOLD];
91+
uint32_t list_size;
92+
#endif
7293
hashtable_t *map;
7394
uint32_t capacity;
74-
uint32_t lru_capacity;
7595
} cache_t;
7696

7797
static inline void INIT_LIST_HEAD(struct list_head *head)
@@ -80,6 +100,11 @@ static inline void INIT_LIST_HEAD(struct list_head *head)
80100
head->prev = head;
81101
}
82102

103+
static inline int list_empty(const struct list_head *head)
104+
{
105+
return (head->next == head);
106+
}
107+
83108
static inline void list_add(struct list_head *node, struct list_head *head)
84109
{
85110
struct list_head *next = head->next;
@@ -107,6 +132,9 @@ static inline void list_del_init(struct list_head *node)
107132

108133
#define list_entry(node, type, member) container_of(node, type, member)
109134

135+
#define list_first_entry(head, type, member) \
136+
list_entry((head)->next, type, member)
137+
110138
#define list_last_entry(head, type, member) \
111139
list_entry((head)->prev, type, member)
112140

@@ -194,14 +222,15 @@ static inline void hlist_del_init(struct hlist_node *n)
194222
pos = hlist_entry_safe((pos)->member.next, type, member))
195223
#endif
196224

225+
197226
cache_t *cache_create(int size_bits)
198227
{
199228
cache_t *cache = malloc(sizeof(cache_t));
200229
if (!cache)
201230
return NULL;
202231
cache_size_bits = size_bits;
203232
cache_size = 1 << size_bits;
204-
233+
#if RV32_HAS(ARC)
205234
for (int i = 0; i < N_CACHE_LIST_TYPES; i++) {
206235
cache->lists[i] = malloc(sizeof(struct list_head));
207236
INIT_LIST_HEAD(cache->lists[i]);
@@ -224,12 +253,41 @@ cache_t *cache_create(int size_bits)
224253
for (uint32_t i = 0; i < cache_size; i++) {
225254
INIT_HLIST_HEAD(&cache->map->ht_list_head[i]);
226255
}
256+
cache->lru_capacity = cache_size / 2;
257+
cache_mp =
258+
mpool_create(cache_size * 2 * sizeof(arc_entry_t), sizeof(arc_entry_t));
259+
#else
260+
for (int i = 0; i < THRESHOLD; i++) {
261+
cache->lists[i] = malloc(sizeof(struct list_head));
262+
INIT_LIST_HEAD(cache->lists[i]);
263+
}
227264

265+
cache->map = malloc(sizeof(hashtable_t));
266+
if (!cache->map) {
267+
free(cache->lists);
268+
free(cache);
269+
return NULL;
270+
}
271+
cache->map->ht_list_head = malloc(cache_size * sizeof(struct hlist_head));
272+
if (!cache->map->ht_list_head) {
273+
free(cache->map);
274+
free(cache->lists);
275+
free(cache);
276+
return NULL;
277+
}
278+
for (uint32_t i = 0; i < cache_size; i++) {
279+
INIT_HLIST_HEAD(&cache->map->ht_list_head[i]);
280+
}
281+
cache->list_size = 0;
282+
cache_mp =
283+
mpool_create(cache_size * sizeof(lfu_entry_t), sizeof(lfu_entry_t));
284+
#endif
228285
cache->capacity = cache_size;
229-
cache->lru_capacity = cache_size / 2;
230286
return cache;
231287
}
232288

289+
290+
#if RV32_HAS(ARC)
233291
/* Rules of ARC
234292
* 1. size of LRU_list + size of LFU_list <= c
235293
* 2. size of LRU_list + size of LRU_ghost_list <= c
@@ -273,12 +331,14 @@ static inline void move_to_mru(cache_t *cache,
273331
list_del_init(&entry->list);
274332
list_add(&entry->list, cache->lists[type]);
275333
}
334+
#endif
276335

277336
void *cache_get(cache_t *cache, uint32_t key)
278337
{
279338
if (!cache->capacity || hlist_empty(&cache->map->ht_list_head[HASH(key)]))
280339
return NULL;
281340

341+
#if RV32_HAS(ARC)
282342
arc_entry_t *entry = NULL;
283343
#ifdef __HAVE_TYPEOF
284344
hlist_for_each_entry (entry, &cache->map->ht_list_head[HASH(key)], ht_list)
@@ -323,13 +383,35 @@ void *cache_get(cache_t *cache, uint32_t key)
323383
}
324384

325385
CACHE_ASSERT(cache);
386+
#else
387+
lfu_entry_t *entry = NULL;
388+
#ifdef __HAVE_TYPEOF
389+
hlist_for_each_entry (entry, &cache->map->ht_list_head[HASH(key)], ht_list)
390+
#else
391+
hlist_for_each_entry (entry, &cache->map->ht_list_head[HASH(key)], ht_list,
392+
lfu_entry_t)
393+
#endif
394+
{
395+
if (entry->key == key)
396+
break;
397+
}
398+
if (!entry || entry->key != key)
399+
return NULL;
400+
401+
/* We would translate the block with a frequency of more thanTHRESHOLD */
402+
if (entry->frequency < THRESHOLD) {
403+
list_del_init(&entry->list);
404+
list_add(&entry->list, cache->lists[entry->frequency++]);
405+
}
406+
#endif
326407
/* return NULL if cache miss */
327408
return entry->value;
328409
}
329410

330411
void *cache_put(cache_t *cache, uint32_t key, void *value)
331412
{
332413
void *delete_value = NULL;
414+
#if RV32_HAS(ARC)
333415
assert(cache->list_size[LRU_list] + cache->list_size[LRU_ghost_list] <=
334416
cache->capacity);
335417
/* Before adding new element to cach, we should check the status
@@ -343,7 +425,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
343425
list_del_init(&delete_target->list);
344426
hlist_del_init(&delete_target->ht_list);
345427
delete_value = delete_target->value;
346-
free(delete_target);
428+
mpool_free(cache_mp, delete_target);
347429
cache->list_size[LRU_ghost_list]--;
348430
if (cache->list_size[LRU_list] &&
349431
cache->list_size[LRU_list] >= cache->lru_capacity)
@@ -357,7 +439,7 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
357439
list_del_init(&delete_target->list);
358440
hlist_del_init(&delete_target->ht_list);
359441
delete_value = delete_target->value;
360-
free(delete_target);
442+
mpool_free(cache_mp, delete_target);
361443
cache->list_size[LRU_list]--;
362444
}
363445
} else {
@@ -372,12 +454,12 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
372454
list_del_init(&delete_target->list);
373455
hlist_del_init(&delete_target->ht_list);
374456
delete_value = delete_target->value;
375-
free(delete_target);
457+
mpool_free(cache_mp, delete_target);
376458
cache->list_size[LFU_ghost_list]--;
377459
}
378460
REPLACE_LIST(>, >=)
379461
}
380-
arc_entry_t *new_entry = malloc(sizeof(arc_entry_t));
462+
arc_entry_t *new_entry = mpool_alloc(cache_mp);
381463
new_entry->key = key;
382464
new_entry->value = value;
383465
/* check if all cache become LFU */
@@ -393,21 +475,63 @@ void *cache_put(cache_t *cache, uint32_t key, void *value)
393475
hlist_add_head(&new_entry->ht_list, &cache->map->ht_list_head[HASH(key)]);
394476

395477
CACHE_ASSERT(cache);
478+
#else
479+
assert(cache->list_size <= cache->capacity);
480+
/* Before adding new element to cach, we should check the status
481+
* of cache.
482+
*/
483+
if (cache->list_size == cache->capacity) {
484+
for (int i = 0; i < THRESHOLD; i++) {
485+
if (!list_empty(cache->lists[i])) {
486+
lfu_entry_t *delete_target =
487+
list_last_entry(cache->lists[i], lfu_entry_t, list);
488+
list_del_init(&delete_target->list);
489+
hlist_del_init(&delete_target->ht_list);
490+
delete_value = delete_target->value;
491+
cache->list_size--;
492+
mpool_free(cache_mp, delete_target);
493+
break;
494+
}
495+
}
496+
}
497+
lfu_entry_t *new_entry = mpool_alloc(cache_mp);
498+
new_entry->key = key;
499+
new_entry->value = value;
500+
new_entry->frequency = 0;
501+
list_add(&new_entry->list, cache->lists[new_entry->frequency++]);
502+
cache->list_size++;
503+
hlist_add_head(&new_entry->ht_list, &cache->map->ht_list_head[HASH(key)]);
504+
assert(cache->list_size <= cache->capacity);
505+
#endif
396506
return delete_value;
397507
}
398508

399509
void cache_free(cache_t *cache, void (*callback)(void *))
400510
{
511+
#if RV32_HAS(ARC)
401512
for (int i = 0; i < N_CACHE_LIST_TYPES; i++) {
402513
arc_entry_t *entry, *safe;
403514
#ifdef __HAVE_TYPEOF
404515
list_for_each_entry_safe (entry, safe, cache->lists[i], list)
405516
#else
406517
list_for_each_entry_safe (entry, safe, cache->lists[i], list,
407518
arc_entry_t)
519+
#endif
520+
#else
521+
for (int i = 0; i < THRESHOLD; i++) {
522+
if (list_empty(cache->lists[i]))
523+
continue;
524+
lfu_entry_t *entry, *safe;
525+
#ifdef __HAVE_TYPEOF
526+
list_for_each_entry_safe (entry, safe, cache->lists[i], list)
527+
#else
528+
list_for_each_entry_safe (entry, safe, cache->lists[i], list,
529+
lfu_entry_t)
530+
#endif
408531
#endif
409532
callback(entry->value);
410533
}
534+
mpool_destory(cache_mp);
411535
free(cache->map->ht_list_head);
412536
free(cache->map);
413537
free(cache);

src/feature.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,11 @@
4848
#define RV32_FEATURE_GDBSTUB 1
4949
#endif
5050

51+
/* Import adaptive replacement cache to manage block */
52+
#ifndef RV32_FEATURE_ARC
53+
#define RV32_FEATURE_ARC 0
54+
#endif
55+
5156
/* Feature test macro */
5257
#define RV32_HAS(x) RV32_FEATURE_##x
5358

tests/cache/arc/cache-get.expect

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
NEW CACHE
2+
NULL
3+
NULL
4+
3
5+
FREE CACHE
File renamed without changes.

0 commit comments

Comments
 (0)