forked from lesismal/arpc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
bufferpool.go
177 lines (153 loc) · 3.42 KB
/
bufferpool.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
// Copyright 2020 lesismal. All rights reserved.
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package arpc
import (
"fmt"
"runtime"
"sync"
)
// BufferPool is the default buffer pool instance.
var BufferPool interface {
Malloc(size int) []byte
Realloc(buf []byte, size int) []byte
Free(buf []byte)
} = New(64)
const holderSize = 1024 * 1024 * 4
var holderBuffer = make([]byte, holderSize)
// MemPool
type MemPool struct {
minSize int
pool sync.Pool
Debug bool
mux sync.Mutex
allocStacks map[*byte]string
freeStacks map[*byte]string
}
func New(minSize int) *MemPool {
if minSize <= 0 {
minSize = 64
}
mp := &MemPool{
minSize: minSize,
allocStacks: map[*byte]string{},
freeStacks: map[*byte]string{},
// Debug: true,
}
mp.pool.New = func() interface{} {
buf := make([]byte, minSize)
return &buf
}
return mp
}
func (mp *MemPool) Malloc(size int) []byte {
pbuf := mp.pool.Get().(*[]byte)
if cap(*pbuf) < size {
if cap(*pbuf)+holderSize >= size {
*pbuf = (*pbuf)[:cap(*pbuf)]
*pbuf = append(*pbuf, holderBuffer[:size-len(*pbuf)]...)
} else {
mp.pool.Put(pbuf)
newBuf := make([]byte, size)
pbuf = &newBuf
}
}
if mp.Debug {
mp.saveAllocStack(*pbuf)
}
return (*pbuf)[:size]
}
// Realloc .
func (mp *MemPool) Realloc(buf []byte, size int) []byte {
if size <= cap(buf) {
return buf[:size]
}
if cap(buf) < mp.minSize {
return mp.Malloc(size)
}
pbuf := &buf
if cap(*pbuf)+holderSize >= size {
*pbuf = (*pbuf)[:cap(*pbuf)]
*pbuf = append(*pbuf, holderBuffer[:size-len(*pbuf)]...)
} else {
mp.pool.Put(pbuf)
newBuf := make([]byte, size)
pbuf = &newBuf
}
if mp.Debug {
mp.saveAllocStack(*pbuf)
}
return (*pbuf)[:size]
}
// Free .
func (mp *MemPool) Free(buf []byte) {
if cap(buf) < mp.minSize {
return
}
if mp.Debug {
mp.saveFreeStack(buf)
}
mp.pool.Put(&buf)
}
func (mp *MemPool) saveFreeStack(buf []byte) {
p := &(buf[:1][0])
mp.mux.Lock()
defer mp.mux.Unlock()
s, ok := mp.freeStacks[p]
if ok {
allocStack := mp.allocStacks[p]
err := fmt.Errorf("\nbuffer exists: %p\nprevious allocation:\n%v\nprevious free:\n%v\ncurrent free:\n%v", p, allocStack, s, getStack())
panic(err)
}
mp.freeStacks[p] = getStack()
delete(mp.allocStacks, p)
}
func (mp *MemPool) saveAllocStack(buf []byte) {
p := &(buf[:1][0])
mp.mux.Lock()
defer mp.mux.Unlock()
delete(mp.freeStacks, p)
mp.allocStacks[p] = getStack()
}
// NativeAllocator definition
type NativeAllocator struct{}
// Malloc .
func (a *NativeAllocator) Malloc(size int) []byte {
return make([]byte, size)
}
// Realloc .
func (a *NativeAllocator) Realloc(buf []byte, size int) []byte {
if size <= cap(buf) {
return buf[:size]
}
newBuf := make([]byte, size)
copy(newBuf, buf)
return newBuf
}
// Free .
func (a *NativeAllocator) Free(buf []byte) {
}
// Malloc exports default package method
func Malloc(size int) []byte {
return BufferPool.Malloc(size)
}
// Realloc exports default package method
func Realloc(buf []byte, size int) []byte {
return BufferPool.Realloc(buf, size)
}
// Free exports default package method
func Free(buf []byte) {
BufferPool.Free(buf)
}
func getStack() string {
i := 2
str := ""
for ; i < 10; i++ {
pc, file, line, ok := runtime.Caller(i)
if !ok {
break
}
str += fmt.Sprintf("\tstack: %d %v [file: %s] [func: %s] [line: %d]\n", i-1, ok, file, runtime.FuncForPC(pc).Name(), line)
}
return str
}