-
Notifications
You must be signed in to change notification settings - Fork 1.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Using atomic instead of mutex and delete scratch slice #1833
Conversation
Whether using an array with a lock or a linked list with atomic operations to manage the workerChan resources, the subsequent operations on workerChan are I/O-intensive. Given that the operations are FILO and each element involves significant I/O, I don't think a linked list has any particular advantage. |
I got a boost in benchmark tests by almost a factor and a half |
Can you show which benchmarks and their results here? |
func BenchmarkWorkerPoolStartStopSerial(b *testing.B) {
for i := 0; i < b.N; i++ {
testWorkerPoolStartStopBENCH()
}
}
func BenchmarkWorkerPoolStartStopConcurrent(b *testing.B) {
concurrency := 10
ch := make(chan struct{}, concurrency)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < concurrency; j++ {
go func() {
testWorkerPoolStartStopBENCH()
ch <- struct{}{}
}()
}
for j := 0; j < concurrency; j++ {
select {
case <-ch:
case <-time.After(time.Second):
b.Fatalf("timeout")
}
}
}
}
func BenchmarkWorkerPoolMaxWorkersCountSerial(b *testing.B) {
for i := 0; i < b.N; i++ {
testWorkerPoolMaxWorkersCountMultiBENCH(b)
}
}
func BenchmarkWorkerPoolMaxWorkersCountConcurrent(b *testing.B) {
concurrency := 4
ch := make(chan struct{}, concurrency)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for j := 0; j < concurrency; j++ {
go func() {
testWorkerPoolMaxWorkersCountMultiBENCH(b)
ch <- struct{}{}
}()
}
for j := 0; j < concurrency; j++ {
select {
case <-ch:
case <-time.After(time.Second * 2):
b.Fatalf("timeout")
}
}
}
}
func testWorkerPoolStartStopBENCH() {
wp := &workerPool{
WorkerFunc: func(conn net.Conn) error { return nil },
MaxWorkersCount: 10,
Logger: defaultLogger,
}
for i := 0; i < 10; i++ {
wp.Start()
wp.Stop()
}
}
func testWorkerPoolMaxWorkersCountMultiBENCH(b *testing.B) {
for i := 0; i < 5; i++ {
testWorkerPoolMaxWorkersCountBENCH(b)
}
}
func testWorkerPoolMaxWorkersCountBENCH(b *testing.B) {
ready := make(chan struct{})
wp := &workerPool{
WorkerFunc: func(conn net.Conn) error {
buf := make([]byte, 100)
n, err := conn.Read(buf)
if err != nil {
b.Errorf("unexpected error: %v", err)
}
buf = buf[:n]
if string(buf) != "foobar" {
b.Errorf("unexpected data read: %q. Expecting %q", buf, "foobar")
}
if _, err = conn.Write([]byte("baz")); err != nil {
b.Errorf("unexpected error: %v", err)
}
<-ready
return nil
},
MaxWorkersCount: 10,
Logger: defaultLogger,
connState: func(net.Conn, ConnState) {},
}
wp.Start()
ln := fasthttputil.NewInmemoryListener()
clientCh := make(chan struct{}, wp.MaxWorkersCount)
for i := 0; i < wp.MaxWorkersCount; i++ {
go func() {
conn, err := ln.Dial()
if err != nil {
b.Errorf("unexpected error: %v", err)
}
if _, err = conn.Write([]byte("foobar")); err != nil {
b.Errorf("unexpected error: %v", err)
}
data, err := io.ReadAll(conn)
if err != nil {
b.Errorf("unexpected error: %v", err)
}
if string(data) != "baz" {
b.Errorf("unexpected value read: %q. Expecting %q", data, "baz")
}
if err = conn.Close(); err != nil {
b.Errorf("unexpected error: %v", err)
}
clientCh <- struct{}{}
}()
}
for i := 0; i < wp.MaxWorkersCount; i++ {
conn, err := ln.Accept()
if err != nil {
b.Fatalf("unexpected error: %v", err)
}
if !wp.Serve(conn) {
b.Fatalf("worker pool must have enough workers to serve the conn")
}
}
go func() {
if _, err := ln.Dial(); err != nil {
b.Errorf("unexpected error: %v", err)
}
}()
conn, err := ln.Accept()
if err != nil {
b.Fatalf("unexpected error: %v", err)
}
for i := 0; i < 5; i++ {
if wp.Serve(conn) {
b.Fatalf("worker pool must be full")
}
}
if err = conn.Close(); err != nil {
b.Fatalf("unexpected error: %v", err)
}
close(ready)
for i := 0; i < wp.MaxWorkersCount; i++ {
select {
case <-clientCh:
case <-time.After(time.Second):
b.Fatalf("timeout")
}
}
if err := ln.Close(); err != nil {
b.Fatalf("unexpected error: %v", err)
}
wp.Stop()
} I used this benchmark based on tests
resultsUsingMutexAndSlice:
|
Any idea what is causing the extra allocations? |
This option immediately exits the loop when the maximum number of vorkers is reached, rather than creating a new vorker if the limit is reached. This reduces the frequency of unnecessary operations and potential locks in sync.Pool
i fixed
problem was in getch() |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Our linter would like to see some changes.
Co-authored-by: Erik Dubbelboer <[email protected]>
Co-authored-by: Erik Dubbelboer <[email protected]>
Co-authored-by: Erik Dubbelboer <[email protected]>
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Seems like the code isn't completely thread safe, 3 tests failed with the race detector.
I may have ruled out the last possible data races |
Thanks! |
No description provided.