Skip to content

Commit

Permalink
bpf: Add Batch Methdods
Browse files Browse the repository at this point in the history
As of kernel v5.6 batch methods allow for the fast lookup,
deletion, and updating of bpf maps so that the syscall
overhead (repeatedly calling into any of these methods)
can be avoided.

The batch methods are as follows:
 * BatchUpdate
 * BatchLookup
 * BatchLookupAndDelete
 * BatchDelete

Only the "array" and "hash" types currently support
batch operations, and the "array" type does not support
batch deletion.

Tests are in place to test every scenario and helper
functions have been written to catch errors that
normally the kernel would give to helpful to users
of the library.

Signed-off-by: Nate Sweet <[email protected]>
  • Loading branch information
nathanjsweet committed Feb 4, 2021
1 parent e21b849 commit e979c3c
Show file tree
Hide file tree
Showing 5 changed files with 406 additions and 3 deletions.
129 changes: 129 additions & 0 deletions map.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"fmt"
"io"
"path/filepath"
"reflect"
"strings"

"github.com/cilium/ebpf/internal"
Expand All @@ -17,6 +18,7 @@ var (
ErrKeyNotExist = errors.New("key does not exist")
ErrKeyExist = errors.New("key already exists")
ErrIterationAborted = errors.New("iteration aborted")
ErrBatchOpNotSup = errors.New("batch operations not supported for this map type")
)

// MapOptions control loading a map into the kernel.
Expand Down Expand Up @@ -579,6 +581,133 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error {
return nil
}

// BatchLookup looks up many elements in a map at once
// with the startKey being the first element to start
// from.
func (m *Map) BatchLookup(startKey, nextKey, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
return m.batchLookup(false, startKey, nextKey, keysOut, valuesOut, opts)
}

// BatchLookup looks up many elements in a map at once
// with the startKey being the first element to start
// from. It then deletes all those elements.
func (m *Map) BatchLookupAndDelete(startKey, nextKey, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
return m.batchLookup(true, startKey, nextKey, keysOut, valuesOut, opts)
}

func (m *Map) batchLookup(del bool, startKey, nextKey, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
if err := haveBatchAPI(); err != nil {
return 0, err
}
if !m.typ.canBatch() || (del && !m.typ.canBatchDelete()) {
return 0, ErrBatchOpNotSup
}
keysValue := reflect.ValueOf(keysOut)
if keysValue.Kind() != reflect.Slice {
return 0, fmt.Errorf("keys must be a slice")
}
valuesValue := reflect.ValueOf(valuesOut)
if valuesValue.Kind() != reflect.Slice {
return 0, fmt.Errorf("valuesOut must be a slice")
}
count := keysValue.Len()
if count != valuesValue.Len() {
return 0, fmt.Errorf("keysOut and valuesOut must be the same length")
}
keyBuf := make([]byte, count*int(m.keySize))
keyPtr := internal.NewSlicePointer(keyBuf)
valueBuf := make([]byte, count*int(m.valueSize))
valuePtr := internal.NewSlicePointer(valueBuf)

var (
startPtr internal.Pointer
err error
)
if startKey != nil {
startPtr, err = marshalPtr(startKey, int(m.keySize))
if err != nil {
return 0, err
}
}

nextPtr, nextBuf := makeBuffer(nextKey, int(m.keySize))
var ct uint32
if del {
ct, err = bpfMapBatch(internal.BPF_MAP_LOOKUP_AND_DELETE_BATCH, m.fd, startPtr, nextPtr, keyPtr, valuePtr, uint32(count), opts)
} else {
ct, err = bpfMapBatch(internal.BPF_MAP_LOOKUP_BATCH, m.fd, startPtr, nextPtr, keyPtr, valuePtr, uint32(count), opts)
}
if err != nil && !errors.Is(err, ErrKeyNotExist) {
return 0, err
}

err = m.unmarshalKey(nextKey, nextBuf)
if err != nil {
return 0, err
}
err = unmarshalBytes(keysOut, keyBuf)
if err != nil {
return 0, err
}
return int(ct), unmarshalBytes(valuesOut, valueBuf)
}

// BatchUpdate updates the map with multiple keys and values
// simultaneously.
func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) {
if err := haveBatchAPI(); err != nil {
return 0, err
}
if !m.typ.canBatch() {
return 0, ErrBatchOpNotSup
}
keysValue := reflect.ValueOf(keys)
if keysValue.Kind() != reflect.Slice {
return 0, fmt.Errorf("keys must be a slice")
}
valuesValue := reflect.ValueOf(values)
if valuesValue.Kind() != reflect.Slice {
return 0, fmt.Errorf("values must be a slice")
}
count := keysValue.Len()
if count != valuesValue.Len() {
return 0, fmt.Errorf("keys and values must be the same length")
}
keyPtr, err := marshalPtr(keys, count*int(m.keySize))
if err != nil {
return 0, err
}
valuePtr, err := marshalPtr(values, count*int(m.valueSize))
if err != nil {
return 0, err
}
var nilPtr internal.Pointer
ct, err := bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, valuePtr, uint32(count), opts)
return int(ct), err
}

// BatchDelete batch deletes entries in the map by keys
func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) {
if err := haveBatchAPI(); err != nil {
return 0, err
}
if !m.typ.canBatchDelete() {
return 0, ErrBatchOpNotSup
}
keysValue := reflect.ValueOf(keys)
if keysValue.Kind() != reflect.Slice {
return 0, fmt.Errorf("keys must be a slice")
}
count := keysValue.Len()
keyPtr, err := marshalPtr(keys, count*int(m.keySize))
if err != nil {
return 0, fmt.Errorf("cannot marshal keys: %v", err)
}
var nilPtr internal.Pointer
ct, err := bpfMapBatch(internal.BPF_MAP_DELETE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, nilPtr, uint32(count), opts)
return int(ct), err
}

// Iterate traverses a map.
//
// It's safe to create multiple iterators at the same time.
Expand Down
192 changes: 192 additions & 0 deletions map_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"math"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"testing"
Expand Down Expand Up @@ -68,6 +69,197 @@ func TestMap(t *testing.T) {
}
}

func TestBatchAPIArray(t *testing.T) {
if err := haveBatchAPI(); err != nil {
t.Skipf("batch api not available: %v", err)
}
m, err := NewMap(&MapSpec{
Type: Array,
KeySize: 4,
ValueSize: 4,
MaxEntries: 10,
})
if err != nil {
t.Fatal(err)
}
defer m.Close()

var (
nextKey uint32
keys = []uint32{0, 1}
values = []uint32{42, 4242}
lookupKeys = make([]uint32, 2)
lookupValues = make([]uint32, 2)
deleteKeys = make([]uint32, 2)
deleteValues = make([]uint32, 2)
)

count, err := m.BatchUpdate(keys, values, nil)
if err != nil {
t.Fatalf("BatchUpdate: %v", err)
}
if count != len(keys) {
t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys))
}

var v uint32
if err := m.Lookup(uint32(0), &v); err != nil {
t.Fatal("Can't lookup 0:", err)
}
if v != 42 {
t.Error("Want value 42, got", v)
}

count, err = m.BatchLookup(nil, &nextKey, lookupKeys, lookupValues, nil)
if err != nil {
t.Fatalf("BatchLookup: %v", err)
}
if count != len(lookupKeys) {
t.Fatalf("BatchLookup: returned %d results, expected %d", count, len(lookupKeys))
}
if nextKey != lookupKeys[1] {
t.Fatalf("BatchLookup: expected nextKey, %d, to be the same as the lastKey returned, %d", nextKey, lookupKeys[1])
}
if !reflect.DeepEqual(keys, lookupKeys) {
t.Errorf("BatchUpdate and BatchLookup keys disagree: %v %v", keys, lookupKeys)
}
if !reflect.DeepEqual(values, lookupValues) {
t.Errorf("BatchUpdate and BatchLookup values disagree: %v %v", values, lookupValues)
}

_, err = m.BatchLookupAndDelete(nil, &nextKey, deleteKeys, deleteValues, nil)
if !errors.Is(err, ErrBatchOpNotSup) {
t.Fatalf("BatchLookUpDelete: expected error %v, but got %v", ErrBatchOpNotSup, err)
}
}

func TestBatchAPIHash(t *testing.T) {
if err := haveBatchAPI(); err != nil {
t.Skipf("batch api not available: %v", err)
}
m, err := NewMap(&MapSpec{
Type: Hash,
KeySize: 4,
ValueSize: 4,
MaxEntries: 10,
})
if err != nil {
t.Fatal(err)
}
defer m.Close()

var (
nextKey uint32
keys = []uint32{0, 1}
values = []uint32{42, 4242}
lookupKeys = make([]uint32, 2)
lookupValues = make([]uint32, 2)
deleteKeys = make([]uint32, 2)
deleteValues = make([]uint32, 2)
)

count, err := m.BatchUpdate(keys, values, nil)
if err != nil {
t.Fatalf("BatchUpdate: %v", err)
}
if count != len(keys) {
t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys))
}

var v uint32
if err := m.Lookup(uint32(0), &v); err != nil {
t.Fatal("Can't lookup 0:", err)
}
if v != 42 {
t.Error("Want value 42, got", v)
}

count, err = m.BatchLookup(nil, &nextKey, lookupKeys, lookupValues, nil)
if err != nil {
t.Fatalf("BatchLookup: %v", err)
}
if count != len(lookupKeys) {
t.Fatalf("BatchLookup: returned %d results, expected %d", count, len(lookupKeys))
}
sort.Slice(lookupKeys, func(i, j int) bool { return lookupKeys[i] < lookupKeys[j] })
if !reflect.DeepEqual(keys, lookupKeys) {
t.Errorf("BatchUpdate and BatchLookup keys disagree: %v %v", keys, lookupKeys)
}
sort.Slice(lookupValues, func(i, j int) bool { return lookupValues[i] < lookupValues[j] })
if !reflect.DeepEqual(values, lookupValues) {
t.Errorf("BatchUpdate and BatchLookup values disagree: %v %v", values, lookupValues)
}

count, err = m.BatchLookupAndDelete(nil, &nextKey, deleteKeys, deleteValues, nil)
if err != nil {
t.Fatalf("BatchLookupAndDelete: %v", err)
}
if count != len(deleteKeys) {
t.Fatalf("BatchLookupAndDelete: returned %d results, expected %d", count, len(deleteKeys))
}
sort.Slice(deleteKeys, func(i, j int) bool { return deleteKeys[i] < deleteKeys[j] })
if !reflect.DeepEqual(keys, deleteKeys) {
t.Errorf("BatchUpdate and BatchLookupAndDelete keys disagree: %v %v", keys, deleteKeys)
}
sort.Slice(deleteValues, func(i, j int) bool { return deleteValues[i] < deleteValues[j] })
if !reflect.DeepEqual(values, deleteValues) {
t.Errorf("BatchUpdate and BatchLookupAndDelete values disagree: %v %v", values, deleteValues)
}

if err := m.Lookup(uint32(0), &v); !errors.Is(err, ErrKeyNotExist) {
t.Fatalf("Lookup should have failed with error, %v, instead error is %v", ErrKeyNotExist, err)
}
}

func TestBatchAPIMapDelete(t *testing.T) {
if err := haveBatchAPI(); err != nil {
t.Skipf("batch api not available: %v", err)
}
m, err := NewMap(&MapSpec{
Type: Hash,
KeySize: 4,
ValueSize: 4,
MaxEntries: 10,
})
if err != nil {
t.Fatal(err)
}
defer m.Close()

var (
keys = []uint32{0, 1}
values = []uint32{42, 4242}
)

count, err := m.BatchUpdate(keys, values, nil)
if err != nil {
t.Fatalf("BatchUpdate: %v", err)
}
if count != len(keys) {
t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys))
}

var v uint32
if err := m.Lookup(uint32(0), &v); err != nil {
t.Fatal("Can't lookup 0:", err)
}
if v != 42 {
t.Error("Want value 42, got", v)
}

count, err = m.BatchDelete(keys, nil)
if err != nil {
t.Fatalf("BatchDelete: %v", err)
}
if count != len(keys) {
t.Fatalf("BatchDelete: expected %d deletions got %d", len(keys), count)
}

if err := m.Lookup(uint32(0), &v); !errors.Is(err, ErrKeyNotExist) {
t.Fatalf("Lookup should have failed with error, %v, instead error is %v", ErrKeyNotExist, err)
}
}

func TestMapClose(t *testing.T) {
m := createArray(t)

Expand Down
Loading

0 comments on commit e979c3c

Please sign in to comment.