Skip to content

Commit 17dd9f8

Browse files
Seth Jenningsgregkh
authored andcommitted
staging: zcache: crypto API support
This patch allow zcache to use the crypto API for page compression. It replaces the direct LZO compress/decompress calls with calls into the crypto compression API. The compressor to be used is specified in the kernel boot line with the zcache parameter like: zcache=lzo or zcache=deflate. If the specified compressor can't be loaded, zcache uses lzo as the default compressor. Signed-off-by: Seth Jennings <[email protected]> Acked-by: Dan Magenheimer <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent af9584b commit 17dd9f8

File tree

2 files changed

+126
-31
lines changed

2 files changed

+126
-31
lines changed

drivers/staging/zcache/Kconfig

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,12 @@
11
config ZCACHE
22
tristate "Dynamic compression of swap pages and clean pagecache pages"
3-
depends on CLEANCACHE || FRONTSWAP
3+
depends on (CLEANCACHE || FRONTSWAP) && CRYPTO
44
select XVMALLOC
5-
select LZO_COMPRESS
6-
select LZO_DECOMPRESS
5+
select CRYPTO_LZO
76
default n
87
help
98
Zcache doubles RAM efficiency while providing a significant
10-
performance boosts on many workloads. Zcache uses lzo1x
9+
performance boosts on many workloads. Zcache uses
1110
compression and an in-kernel implementation of transcendent
1211
memory to store clean page cache pages and swap in RAM,
1312
providing a noticeable reduction in disk I/O.

drivers/staging/zcache/zcache-main.c

Lines changed: 123 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@
66
*
77
* Zcache provides an in-kernel "host implementation" for transcendent memory
88
* and, thus indirectly, for cleancache and frontswap. Zcache includes two
9-
* page-accessible memory [1] interfaces, both utilizing lzo1x compression:
9+
* page-accessible memory [1] interfaces, both utilizing the crypto compression
10+
* API:
1011
* 1) "compression buddies" ("zbud") is used for ephemeral pages
1112
* 2) xvmalloc is used for persistent pages.
1213
* Xvmalloc (based on the TLSF allocator) has very low fragmentation
@@ -23,12 +24,13 @@
2324
#include <linux/cpu.h>
2425
#include <linux/highmem.h>
2526
#include <linux/list.h>
26-
#include <linux/lzo.h>
2727
#include <linux/slab.h>
2828
#include <linux/spinlock.h>
2929
#include <linux/types.h>
3030
#include <linux/atomic.h>
3131
#include <linux/math64.h>
32+
#include <linux/crypto.h>
33+
#include <linux/string.h>
3234
#include "tmem.h"
3335

3436
#include "../zram/xvmalloc.h" /* if built in drivers/staging */
@@ -81,6 +83,38 @@ static inline bool is_local_client(struct zcache_client *cli)
8183
return cli == &zcache_host;
8284
}
8385

86+
/* crypto API for zcache */
87+
#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
88+
static char zcache_comp_name[ZCACHE_COMP_NAME_SZ];
89+
static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms;
90+
91+
enum comp_op {
92+
ZCACHE_COMPOP_COMPRESS,
93+
ZCACHE_COMPOP_DECOMPRESS
94+
};
95+
96+
static inline int zcache_comp_op(enum comp_op op,
97+
const u8 *src, unsigned int slen,
98+
u8 *dst, unsigned int *dlen)
99+
{
100+
struct crypto_comp *tfm;
101+
int ret;
102+
103+
BUG_ON(!zcache_comp_pcpu_tfms);
104+
tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
105+
BUG_ON(!tfm);
106+
switch (op) {
107+
case ZCACHE_COMPOP_COMPRESS:
108+
ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
109+
break;
110+
case ZCACHE_COMPOP_DECOMPRESS:
111+
ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
112+
break;
113+
}
114+
put_cpu();
115+
return ret;
116+
}
117+
84118
/**********
85119
* Compression buddies ("zbud") provides for packing two (or, possibly
86120
* in the future, more) compressed ephemeral pages into a single "raw"
@@ -408,7 +442,7 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
408442
{
409443
struct zbud_page *zbpg;
410444
unsigned budnum = zbud_budnum(zh);
411-
size_t out_len = PAGE_SIZE;
445+
unsigned int out_len = PAGE_SIZE;
412446
char *to_va, *from_va;
413447
unsigned size;
414448
int ret = 0;
@@ -425,8 +459,9 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
425459
to_va = kmap_atomic(page, KM_USER0);
426460
size = zh->size;
427461
from_va = zbud_data(zh, size);
428-
ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
429-
BUG_ON(ret != LZO_E_OK);
462+
ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
463+
to_va, &out_len);
464+
BUG_ON(ret);
430465
BUG_ON(out_len != PAGE_SIZE);
431466
kunmap_atomic(to_va, KM_USER0);
432467
out:
@@ -624,7 +659,7 @@ static int zbud_show_cumul_chunk_counts(char *buf)
624659

625660
/**********
626661
* This "zv" PAM implementation combines the TLSF-based xvMalloc
627-
* with lzo1x compression to maximize the amount of data that can
662+
* with the crypto compression API to maximize the amount of data that can
628663
* be packed into a physical page.
629664
*
630665
* Zv represents a PAM page with the index and object (plus a "size" value
@@ -711,7 +746,7 @@ static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
711746

712747
static void zv_decompress(struct page *page, struct zv_hdr *zv)
713748
{
714-
size_t clen = PAGE_SIZE;
749+
unsigned int clen = PAGE_SIZE;
715750
char *to_va;
716751
unsigned size;
717752
int ret;
@@ -720,10 +755,10 @@ static void zv_decompress(struct page *page, struct zv_hdr *zv)
720755
size = xv_get_object_size(zv) - sizeof(*zv);
721756
BUG_ON(size == 0);
722757
to_va = kmap_atomic(page, KM_USER0);
723-
ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
724-
size, to_va, &clen);
758+
ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
759+
size, to_va, &clen);
725760
kunmap_atomic(to_va, KM_USER0);
726-
BUG_ON(ret != LZO_E_OK);
761+
BUG_ON(ret);
727762
BUG_ON(clen != PAGE_SIZE);
728763
}
729764

@@ -1286,55 +1321,73 @@ static struct tmem_pamops zcache_pamops = {
12861321
* zcache compression/decompression and related per-cpu stuff
12871322
*/
12881323

1289-
#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
1290-
#define LZO_DSTMEM_PAGE_ORDER 1
1291-
static DEFINE_PER_CPU(unsigned char *, zcache_workmem);
12921324
static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
1325+
#define ZCACHE_DSTMEM_ORDER 1
12931326

12941327
static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
12951328
{
12961329
int ret = 0;
12971330
unsigned char *dmem = __get_cpu_var(zcache_dstmem);
1298-
unsigned char *wmem = __get_cpu_var(zcache_workmem);
12991331
char *from_va;
13001332

13011333
BUG_ON(!irqs_disabled());
1302-
if (unlikely(dmem == NULL || wmem == NULL))
1303-
goto out; /* no buffer, so can't compress */
1334+
if (unlikely(dmem == NULL))
1335+
goto out; /* no buffer or no compressor so can't compress */
1336+
*out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
13041337
from_va = kmap_atomic(from, KM_USER0);
13051338
mb();
1306-
ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
1307-
BUG_ON(ret != LZO_E_OK);
1339+
ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
1340+
(unsigned int *)out_len);
1341+
BUG_ON(ret);
13081342
*out_va = dmem;
13091343
kunmap_atomic(from_va, KM_USER0);
13101344
ret = 1;
13111345
out:
13121346
return ret;
13131347
}
13141348

1349+
static int zcache_comp_cpu_up(int cpu)
1350+
{
1351+
struct crypto_comp *tfm;
1352+
1353+
tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
1354+
if (IS_ERR(tfm))
1355+
return NOTIFY_BAD;
1356+
*per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
1357+
return NOTIFY_OK;
1358+
}
1359+
1360+
static void zcache_comp_cpu_down(int cpu)
1361+
{
1362+
struct crypto_comp *tfm;
1363+
1364+
tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
1365+
crypto_free_comp(tfm);
1366+
*per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
1367+
}
13151368

13161369
static int zcache_cpu_notifier(struct notifier_block *nb,
13171370
unsigned long action, void *pcpu)
13181371
{
1319-
int cpu = (long)pcpu;
1372+
int ret, cpu = (long)pcpu;
13201373
struct zcache_preload *kp;
13211374

13221375
switch (action) {
13231376
case CPU_UP_PREPARE:
1377+
ret = zcache_comp_cpu_up(cpu);
1378+
if (ret != NOTIFY_OK) {
1379+
pr_err("zcache: can't allocate compressor transform\n");
1380+
return ret;
1381+
}
13241382
per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
1325-
GFP_KERNEL | __GFP_REPEAT,
1326-
LZO_DSTMEM_PAGE_ORDER),
1327-
per_cpu(zcache_workmem, cpu) =
1328-
kzalloc(LZO1X_MEM_COMPRESS,
1329-
GFP_KERNEL | __GFP_REPEAT);
1383+
GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
13301384
break;
13311385
case CPU_DEAD:
13321386
case CPU_UP_CANCELED:
1387+
zcache_comp_cpu_down(cpu);
13331388
free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
1334-
LZO_DSTMEM_PAGE_ORDER);
1389+
ZCACHE_DSTMEM_ORDER);
13351390
per_cpu(zcache_dstmem, cpu) = NULL;
1336-
kfree(per_cpu(zcache_workmem, cpu));
1337-
per_cpu(zcache_workmem, cpu) = NULL;
13381391
kp = &per_cpu(zcache_preloads, cpu);
13391392
while (kp->nr) {
13401393
kmem_cache_free(zcache_objnode_cache,
@@ -1919,6 +1972,44 @@ static int __init no_frontswap(char *s)
19191972

19201973
__setup("nofrontswap", no_frontswap);
19211974

1975+
static int __init enable_zcache_compressor(char *s)
1976+
{
1977+
strncpy(zcache_comp_name, s, ZCACHE_COMP_NAME_SZ);
1978+
zcache_enabled = 1;
1979+
return 1;
1980+
}
1981+
__setup("zcache=", enable_zcache_compressor);
1982+
1983+
1984+
static int zcache_comp_init(void)
1985+
{
1986+
int ret = 0;
1987+
1988+
/* check crypto algorithm */
1989+
if (*zcache_comp_name != '\0') {
1990+
ret = crypto_has_comp(zcache_comp_name, 0, 0);
1991+
if (!ret)
1992+
pr_info("zcache: %s not supported\n",
1993+
zcache_comp_name);
1994+
}
1995+
if (!ret)
1996+
strcpy(zcache_comp_name, "lzo");
1997+
ret = crypto_has_comp(zcache_comp_name, 0, 0);
1998+
if (!ret) {
1999+
ret = 1;
2000+
goto out;
2001+
}
2002+
pr_info("zcache: using %s compressor\n", zcache_comp_name);
2003+
2004+
/* alloc percpu transforms */
2005+
ret = 0;
2006+
zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
2007+
if (!zcache_comp_pcpu_tfms)
2008+
ret = 1;
2009+
out:
2010+
return ret;
2011+
}
2012+
19222013
static int __init zcache_init(void)
19232014
{
19242015
int ret = 0;
@@ -1941,6 +2032,11 @@ static int __init zcache_init(void)
19412032
pr_err("zcache: can't register cpu notifier\n");
19422033
goto out;
19432034
}
2035+
ret = zcache_comp_init();
2036+
if (ret) {
2037+
pr_err("zcache: compressor initialization failed\n");
2038+
goto out;
2039+
}
19442040
for_each_online_cpu(cpu) {
19452041
void *pcpu = (void *)(long)cpu;
19462042
zcache_cpu_notifier(&zcache_cpu_notifier_block,

0 commit comments

Comments
 (0)