Skip to content

Commit 0ee2e08

Browse files
author
vvitkovskiy
committed
Some API improvements
1 parent c1f0345 commit 0ee2e08

File tree

4 files changed

+279
-105
lines changed

4 files changed

+279
-105
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ To be filled
8383
root, err = insaneJSON.DecodeString(json) // get a root from the pool and place decoded json into it
8484
emptyRoot = insaneJSON.Spawn() // get an empty root from the pool
8585

86-
insaneJSON.DecodeStringReusing(emptyRoot, anotherJson) // reuse a root to decode another JSONs
86+
root.DecodeString(emptyRoot, anotherJson) // reuse a root to decode another JSONs
8787

8888
insaneJSON.Release(root) // place roots back to the pool
8989
insaneJSON.Release(emptyRoot)

insane.go

+149-40
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,8 @@ const (
4343
type Type int
4444

4545
var (
46+
StartNodePoolSize = 16
47+
4648
decoderPool = make([]*decoder, 0, 16)
4749
decoderPoolIndex = -1
4850
decoderPoolMu = &sync.Mutex{}
@@ -63,6 +65,7 @@ var (
6365
ErrExpectedComma = errors.New("expected comma")
6466

6567
// api errors
68+
ErrRootIsNil = errors.New("root is nil")
6669
ErrNotFound = errors.New("node isn't found")
6770
ErrNotObject = errors.New("node isn't an object")
6871
ErrNotArray = errors.New("node isn't an array")
@@ -125,6 +128,33 @@ type decoder struct {
125128
nodes int
126129
}
127130

131+
// ReleaseMem sends node pool and internal buffer to GC
132+
// useful to reduce memory usage after decoding big JSON
133+
func (r *Root) ReleaseMem() {
134+
r.ReleasePoolMem()
135+
r.ReleaseBufMem()
136+
}
137+
138+
// ReleasePoolMem sends node pool to GC
139+
func (r *Root) ReleasePoolMem() {
140+
r.data.decoder.initPool()
141+
}
142+
143+
// ReleasePoolMem sends internal buffer to GC
144+
func (r *Root) ReleaseBufMem() {
145+
r.data.decoder.json = make([]byte, 0, 0)
146+
}
147+
148+
// BuffCap returns current size of internal buffer
149+
func (r *Root) BuffCap() int {
150+
return cap(r.data.decoder.json)
151+
}
152+
153+
// PullSize returns current size of node pool
154+
func (r *Root) PullSize() int {
155+
return len(r.data.decoder.nodePool)
156+
}
157+
128158
// ******************** //
129159
// MAIN SHIT //
130160
// ******************** //
@@ -497,18 +527,25 @@ func (d *decoder) decodeHeadless(json string, isPooled bool) (*Root, error) {
497527
return &d.root, nil
498528
}
499529

500-
// EncodeNoAlloc legendary insane encode function
501-
// allocates new byte buffer on every call
502-
// use EncodeNoAlloc to reuse already created buffer and gain more performance
503-
func (n *Node) Encode() []byte {
504-
return n.EncodeNoAlloc([]byte{})
530+
// EncodeToByte legendary insane encode function
531+
// slow because it allocates new byte buffer on every call
532+
// use Encode to reuse already created buffer and gain more performance
533+
func (n *Node) EncodeToByte() []byte {
534+
return n.Encode([]byte{})
535+
}
536+
537+
// EncodeToString legendary insane encode function
538+
// slow because it allocates new string on every call
539+
// use Encode to reuse already created buffer and gain more performance
540+
func (n *Node) EncodeToString() string {
541+
return toString(n.Encode([]byte{}))
505542
}
506543

507-
// EncodeNoAlloc legendary insane encode function
544+
// Encode legendary insane encode function
508545
// uses already created byte buffer to place json data so
509546
// mem allocations may occur only if buffer isn't long enough
510547
// use it for performance
511-
func (n *Node) EncodeNoAlloc(out []byte) []byte {
548+
func (n *Node) Encode(out []byte) []byte {
512549
out = out[:0]
513550
s := 0
514551
curNode := n
@@ -1008,47 +1045,75 @@ func (n *Node) findSelf() int {
10081045
// MUTATIONS //
10091046
// ******************** //
10101047

1011-
func (n *Node) MutateToJSON(json string) *Node {
1012-
if n == nil {
1048+
func (n *Node) MergeWith(node *Node) *Node {
1049+
if n == nil || node == nil {
10131050
return n
10141051
}
1015-
owner := n.parent
1016-
if owner == nil {
1052+
if !n.IsObject() || !node.IsObject() {
10171053
return n
10181054
}
10191055

1020-
root, err := n.data.decoder.decode(json, false)
1021-
if err != nil {
1056+
for _, child := range node.data.values {
1057+
child.unescapeField()
1058+
childField := child.AsString()
1059+
x := n.AddField(childField)
1060+
x.MutateToNode(child.next)
1061+
}
1062+
1063+
return n
1064+
}
1065+
1066+
// MutateToNode it isn't safe function, if you create node cycle, encode() may freeze
1067+
func (n *Node) MutateToNode(node *Node) *Node {
1068+
if n == nil || node == nil {
10221069
return n
10231070
}
1024-
end := root.data.end
1025-
n.tryDropLinks()
10261071

1027-
index := n.actualizeIndex()
1072+
n.tryDropLinks()
10281073

1029-
end.parent = n
1030-
if index != len(owner.data.values)-1 {
1031-
end.next = owner.data.values[index+1]
1032-
} else {
1033-
end.next = owner.data.end
1074+
curNext := n.next
1075+
if n.Type == Object || n.Type == Array {
1076+
curNext = n.data.end.next
10341077
}
10351078

1036-
n.Type = root.Type
1037-
n.next = root.next
1038-
n.value = root.value
1039-
n.data.end = root.data.end
1040-
n.data.flags = root.data.flags
1041-
n.data.values = append(n.data.values[:0], root.data.values...)
1042-
for _, node := range root.data.values {
1043-
node.parent = n
1044-
if root.Type == Object {
1045-
node.next.parent = n
1079+
if node.Type == Object || node.Type == Array {
1080+
node.data.end.next = curNext
1081+
} else {
1082+
node.next = curNext
1083+
}
1084+
1085+
n.Type = node.Type
1086+
n.value = node.value
1087+
if node.Type == Object || node.Type == Array {
1088+
n.next = node.next
1089+
n.data.end = node.data.end
1090+
n.data.end.parent = n.parent
1091+
n.data.flags &= ^FlagFieldMap // reset field mapping
1092+
n.data.values = append(n.data.values[:0], node.data.values...)
1093+
for _, child := range node.data.values {
1094+
child.parent = n
1095+
if node.Type == Object {
1096+
child.next.parent = n
1097+
}
10461098
}
10471099
}
10481100

10491101
return n
10501102
}
10511103

1104+
func (n *Node) MutateToJSON(json string) *Node {
1105+
if n == nil {
1106+
return n
1107+
}
1108+
1109+
node, err := n.data.decoder.decode(json, false)
1110+
if err != nil {
1111+
return n
1112+
}
1113+
1114+
return n.MutateToNode(node)
1115+
}
1116+
10521117
func (n *Node) MutateToField(value string) *Node {
10531118
if n.Type != Field {
10541119
return n
@@ -1255,6 +1320,10 @@ func (n *Node) unescapeStr() {
12551320
}
12561321

12571322
func (n *Node) unescapeField() {
1323+
if n.Type == Field {
1324+
return
1325+
}
1326+
12581327
value := n.value
12591328
i := strings.LastIndexByte(value, '"')
12601329
n.value = unescapeStr(value[1:i])
@@ -1289,6 +1358,19 @@ func (n *Node) AsString() string {
12891358
}
12901359
}
12911360

1361+
func (n *Node) AsBytes() []byte {
1362+
return toByte(n.AsString())
1363+
}
1364+
1365+
func (n *StrictNode) AsBytes() ([]byte, error) {
1366+
s, err := n.AsString()
1367+
if err != nil {
1368+
return nil, err
1369+
}
1370+
1371+
return toByte(s), nil
1372+
}
1373+
12921374
func (n *StrictNode) AsString() (string, error) {
12931375
if n.Type == escapedField {
12941376
panic("insane json really goes outta its mind")
@@ -1501,9 +1583,8 @@ func (n *Node) InStrictMode() *StrictNode {
15011583
// ******************** //
15021584

15031585
func (d *decoder) initPool() {
1504-
l := 1024
1505-
d.nodePool = make([]*Node, l, l)
1506-
for i := 0; i < l; i++ {
1586+
d.nodePool = make([]*Node, StartNodePoolSize, StartNodePoolSize)
1587+
for i := 0; i < StartNodePoolSize; i++ {
15071588
d.nodePool[i] = &Node{data: &data{decoder: d}}
15081589
}
15091590
}
@@ -1557,18 +1638,46 @@ func DecodeString(json string) (*Root, error) {
15571638
return Spawn().data.decoder.decodeHeadless(json, true)
15581639
}
15591640

1560-
func DecodeBytesReusing(root *Root, jsonBytes []byte) error {
1561-
_, err := root.data.decoder.decodeHeadless(toString(jsonBytes), false)
1641+
// DecodeBytes clear root and decode new JSON
1642+
// useful for reusing root to decode multiple times and reduce allocations
1643+
func (r *Root) DecodeBytes(jsonBytes []byte) error {
1644+
if r == nil {
1645+
return ErrRootIsNil
1646+
}
1647+
_, err := r.data.decoder.decodeHeadless(toString(jsonBytes), false)
15621648

15631649
return err
15641650
}
15651651

1566-
func DecodeStringReusing(root *Root, json string) error {
1567-
_, err := root.data.decoder.decodeHeadless(json, false)
1652+
// DecodeString clear root and decode new JSON
1653+
// useful for reusing root to decode multiple times and reduce allocations
1654+
func (r *Root) DecodeString(json string) error {
1655+
if r == nil {
1656+
return ErrRootIsNil
1657+
}
1658+
_, err := r.data.decoder.decodeHeadless(json, false)
15681659

15691660
return err
15701661
}
15711662

1663+
// DecodeBytesAdditional doesn't clean root, uses root's node pool to decode JSON
1664+
func (r *Root) DecodeBytesAdditional(jsonBytes []byte) (*Node, error) {
1665+
if r == nil {
1666+
return nil, ErrRootIsNil
1667+
}
1668+
1669+
return r.data.decoder.decode(toString(jsonBytes), false)
1670+
}
1671+
1672+
// DecodeStringAdditional doesn't clean root, uses root's node pool to decode JSON
1673+
func (r *Root) DecodeStringAdditional(json string) (*Node, error) {
1674+
if r == nil {
1675+
return nil, ErrRootIsNil
1676+
}
1677+
1678+
return r.data.decoder.decode(json, false)
1679+
}
1680+
15721681
func Release(root *Root) {
15731682
if root == nil {
15741683
return
@@ -1930,7 +2039,7 @@ var out = make([]byte, 0, 0)
19302039
var root = Spawn()
19312040

19322041
func Fuzz(data []byte) int {
1933-
err := DecodeBytesReusing(root, data)
2042+
err := root.DecodeBytes(data)
19342043
if err != nil {
19352044
return -1
19362045
}
@@ -2009,7 +2118,7 @@ func Fuzz(data []byte) int {
20092118
}
20102119
}
20112120

2012-
root.EncodeNoAlloc(out)
2121+
root.Encode(out)
20132122

20142123
return 1
20152124
}

insane_perf_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -138,15 +138,15 @@ func BenchmarkFair(b *testing.B) {
138138
root := Spawn()
139139
for i := 0; i < b.N; i++ {
140140
for _, json := range jsons {
141-
_ = DecodeBytesReusing(root, json)
141+
_ = root.DecodeBytes(json)
142142
for j := 0; j < reqCount; j++ {
143143
for _, f := range fields {
144144
for _, ff := range f {
145145
root.Dig(ff...)
146146
}
147147
}
148148
}
149-
s = root.EncodeNoAlloc(s[:0])
149+
s = root.Encode(s[:0])
150150
}
151151
}
152152
Release(root)
@@ -157,7 +157,7 @@ func BenchmarkFair(b *testing.B) {
157157
fn: func(b *testing.B, jsons [][]byte, fields [][][]string, reqCount int) {
158158
root := Spawn()
159159
for _, json := range jsons {
160-
_ = DecodeBytesReusing(root, json)
160+
_ = root.DecodeBytes(json)
161161
for j := 0; j < reqCount; j++ {
162162
for _, f := range fields {
163163
for _, ff := range f {

0 commit comments

Comments
 (0)