Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 6 additions & 37 deletions api.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,29 +21,6 @@ type Issue struct {
DuplicatePos token.Position
}

// IssuePool provides a pool of Issue slices to reduce allocations
var IssuePool = sync.Pool{
New: func() interface{} {
return make([]Issue, 0, 100)
},
}

// GetIssueBuffer retrieves an Issue slice from the pool
func GetIssueBuffer() []Issue {
return IssuePool.Get().([]Issue)[:0] // Reset length but keep capacity
}

// PutIssueBuffer returns an Issue slice to the pool
func PutIssueBuffer(issues []Issue) {
// Make sure to clear references before returning to pool
for i := range issues {
issues[i].MatchingConst = ""
issues[i].Str = ""
}
// Return the slice to the pool
IssuePool.Put(make([]Issue, 0, cap(issues))) //nolint:staticcheck
}

// Config contains all configuration options for the goconst analyzer.
type Config struct {
// IgnoreStrings is a list of regular expressions to filter strings
Expand Down Expand Up @@ -137,13 +114,8 @@ func RunWithConfig(files []*ast.File, fset *token.FileSet, typeInfo *types.Info,
expectedIssues = 1000 // Cap at reasonable maximum
}

// Get issue buffer from pool instead of allocating
issueBuffer := GetIssueBuffer()
if cap(issueBuffer) < expectedIssues {
// Only allocate new buffer if existing one is too small
PutIssueBuffer(issueBuffer)
issueBuffer = make([]Issue, 0, expectedIssues)
}
// Allocate a new buffer
issueBuffer := make([]Issue, 0, expectedIssues)

// Process files concurrently
var wg sync.WaitGroup
Expand Down Expand Up @@ -195,8 +167,8 @@ func RunWithConfig(files []*ast.File, fset *token.FileSet, typeInfo *types.Info,
p.stringMutex.RLock()
p.stringCountMutex.RLock()

// Get a string buffer from pool instead of allocating
stringKeys := GetStringBuffer()
// Create a slice to hold the string keys
stringKeys := make([]string, 0, len(p.strs))

// Create an array of strings to sort for stable output
for str := range p.strs {
Expand Down Expand Up @@ -243,8 +215,8 @@ func RunWithConfig(files []*ast.File, fset *token.FileSet, typeInfo *types.Info,
// process duplicate constants
p.constMutex.RLock()

// reuse string buffer for const keys
stringKeys = stringKeys[:0]
// Create a new slice for const keys
stringKeys = make([]string, 0, len(p.consts))

// Create an array of strings and sort for stable output
for str := range p.consts {
Expand All @@ -271,9 +243,6 @@ func RunWithConfig(files []*ast.File, fset *token.FileSet, typeInfo *types.Info,

p.constMutex.RUnlock()

// Return string buffer to pool
PutStringBuffer(stringKeys)

// Don't return the buffer to pool as the caller now owns it
return issueBuffer, nil
}
Expand Down
5 changes: 0 additions & 5 deletions api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -562,8 +562,3 @@ func checker(fset *token.FileSet) (*types.Checker, *types.Info) {
}
return types.NewChecker(cfg, fset, types.NewPackage("", "example"), info), info
}

func Test_IssuePool(t *testing.T) {
PutIssueBuffer([]Issue{})
GetIssueBuffer()
}
46 changes: 16 additions & 30 deletions parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,6 @@ var ByteBufferPool = sync.Pool{
},
}

// StringBufferPool is a pool for string slices
var StringBufferPool = sync.Pool{
New: func() interface{} {
slice := make([]string, 0, 32)
return &slice
},
}

// ExtendedPosPool is a pool for slices of ExtendedPos
var ExtendedPosPool = sync.Pool{
New: func() interface{} {
Expand Down Expand Up @@ -103,17 +95,6 @@ func PutByteBuffer(buf []byte) {
ByteBufferPool.Put(&bufCopy)
}

// GetStringBuffer retrieves a string slice from the pool
func GetStringBuffer() []string {
return (*StringBufferPool.Get().(*[]string))[:0] // Reset length but keep capacity
}

// PutStringBuffer returns a string slice to the pool
func PutStringBuffer(slice []string) {
sliceCopy := make([]string, 0, cap(slice))
StringBufferPool.Put(&sliceCopy)
}

// GetExtendedPosBuffer retrieves an ExtendedPos slice from the pool
func GetExtendedPosBuffer() []ExtendedPos {
return (*ExtendedPosPool.Get().(*[]ExtendedPos))[:0] // Reset length but keep capacity
Expand Down Expand Up @@ -466,16 +447,19 @@ func (p *Parser) parseConcurrently(filesChan <-chan string) (*token.FileSet, map

parsedFilesChan := make(chan parsedFile, chanSize)

// Add all workers to the WaitGroup before starting any goroutines
// This prevents a race condition with the goroutine that waits
parserWg.Add(p.maxConcurrency)

// Start a separate goroutine to close the channel after all parsers are done
go func() {
parserWg.Wait()
close(parsedFilesChan)
}()

for i := 0; i < p.maxConcurrency; i++ {
parserWg.Add(1)
go func(id int) {
defer func() {
parserWg.Done()
if id == 0 { // first worker waits and closes the sending channel
parserWg.Wait()
close(parsedFilesChan)
}
}()
go func() {
defer parserWg.Done()

for filePath := range filesChan {
// Parse a single file
Expand All @@ -495,7 +479,7 @@ func (p *Parser) parseConcurrently(filesChan <-chan string) (*token.FileSet, map
pkgName := f.Name.Name
parsedFilesChan <- parsedFile{pkgName, f}
}
}(i)
}()
}

// Read all parsed files into packgageFiles map. All packages must be parsed prior to type-checking.
Expand Down Expand Up @@ -526,8 +510,10 @@ func (p *Parser) visitConcurrently(fset *token.FileSet, info *types.Info, filesB

parsedFilesChan := make(chan parsedFile, chanSize)

// Add all workers to the WaitGroup before starting any goroutines
visitorWg.Add(p.maxConcurrency)

for i := 0; i < p.maxConcurrency; i++ {
visitorWg.Add(1)
go func() {
defer visitorWg.Done()
for pf := range parsedFilesChan {
Expand Down
Loading