Skip to content

Commit 688505a

Browse files
authored
enhance: cleanup lint check exclusions (milvus-io#40829)
See: milvus-io#40828 Signed-off-by: Ted Xu <[email protected]>
1 parent 901308d commit 688505a

19 files changed

+63
-75
lines changed

.golangci.yml

+1-5
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ linters-settings:
4949
severity: warning
5050
disabled: false
5151
arguments:
52-
- ["ID"] # Allow list
52+
- ["ID", "IDS"] # Allow list
5353
- name: context-as-argument
5454
severity: warning
5555
disabled: false
@@ -182,7 +182,6 @@ issues:
182182
- ifElseChain
183183
- elseif
184184
- commentFormatting
185-
- var-naming
186185
- exitAfterDefer
187186
- captLocal
188187
- singleCaseSwitch
@@ -193,9 +192,6 @@ issues:
193192
- SA9009
194193
- SA1006
195194
- S1009
196-
- unlambda
197-
- dupCase
198-
- dupArg
199195
- offBy1
200196
- unslice
201197
# Integer overflow conversion

client/.golangci.yml

-4
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,6 @@ issues:
167167
- ifElseChain
168168
- elseif
169169
- commentFormatting
170-
- var-naming
171170
- exitAfterDefer
172171
- captLocal
173172
- singleCaseSwitch
@@ -178,9 +177,6 @@ issues:
178177
- SA9009
179178
- SA1006
180179
- S1009
181-
- unlambda
182-
- dupCase
183-
- dupArg
184180
- offBy1
185181
- unslice
186182
# Integer overflow conversion

internal/proxy/task_search.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -355,7 +355,7 @@ func (t *searchTask) initAdvancedSearchRequest(ctx context.Context) error {
355355
// fetch search_growing from search param
356356
t.SearchRequest.SubReqs = make([]*internalpb.SubSearchRequest, len(t.request.GetSubReqs()))
357357
t.queryInfos = make([]*planpb.QueryInfo, len(t.request.GetSubReqs()))
358-
queryFieldIds := []int64{}
358+
queryFieldIDs := []int64{}
359359
for index, subReq := range t.request.GetSubReqs() {
360360
plan, queryInfo, offset, _, err := t.tryGeneratePlan(subReq.GetSearchParams(), subReq.GetDsl(), subReq.GetExprTemplateValues())
361361
if err != nil {
@@ -386,7 +386,7 @@ func (t *searchTask) initAdvancedSearchRequest(ctx context.Context) error {
386386
}
387387

388388
internalSubReq.FieldId = queryInfo.GetQueryFieldId()
389-
queryFieldIds = append(queryFieldIds, internalSubReq.FieldId)
389+
queryFieldIDs = append(queryFieldIDs, internalSubReq.FieldId)
390390
// set PartitionIDs for sub search
391391
if t.partitionKeyMode {
392392
// isolation has tighter constraint, check first
@@ -429,7 +429,7 @@ func (t *searchTask) initAdvancedSearchRequest(ctx context.Context) error {
429429
}
430430

431431
var err error
432-
if function.HasNonBM25Functions(t.schema.CollectionSchema.Functions, queryFieldIds) {
432+
if function.HasNonBM25Functions(t.schema.CollectionSchema.Functions, queryFieldIDs) {
433433
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-AdvancedSearch-call-function-udf")
434434
defer sp.End()
435435
exec, err := function.NewFunctionExecutor(t.schema.CollectionSchema)

internal/proxy/task_upsert.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,8 @@ type upsertTask struct {
6565
partitionKeyMode bool
6666
partitionKeys *schemapb.FieldData
6767
// automatic generate pk as new pk wehen autoID == true
68-
// delete task need use the oldIds
69-
oldIds *schemapb.IDs
68+
// delete task need use the oldIDs
69+
oldIDs *schemapb.IDs
7070
schemaTimestamp uint64
7171
}
7272

@@ -208,7 +208,7 @@ func (it *upsertTask) insertPreExecute(ctx context.Context) error {
208208
// use the passed pk as new pk when autoID == false
209209
// automatic generate pk as new pk wehen autoID == true
210210
var err error
211-
it.result.IDs, it.oldIds, err = checkUpsertPrimaryFieldData(it.schema.CollectionSchema, it.upsertMsg.InsertMsg)
211+
it.result.IDs, it.oldIDs, err = checkUpsertPrimaryFieldData(it.schema.CollectionSchema, it.upsertMsg.InsertMsg)
212212
log := log.Ctx(ctx).With(zap.String("collectionName", it.upsertMsg.InsertMsg.CollectionName))
213213
if err != nil {
214214
log.Warn("check primary field data and hash primary key failed when upsert",
@@ -507,7 +507,7 @@ func (it *upsertTask) deleteExecute(ctx context.Context, msgPack *msgstream.MsgP
507507
it.result.Status = merr.Status(err)
508508
return err
509509
}
510-
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIds
510+
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIDs
511511
it.upsertMsg.DeleteMsg.HashValues = typeutil.HashPK2Channels(it.upsertMsg.DeleteMsg.PrimaryKeys, channelNames)
512512

513513
// repack delete msg by dmChannel

internal/proxy/task_upsert_streaming.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ func (ut *upsertTaskByStreamingService) packInsertMessage(ctx context.Context) (
9696
func (it *upsertTaskByStreamingService) packDeleteMessage(ctx context.Context) ([]message.MutableMessage, error) {
9797
tr := timerecord.NewTimeRecorder(fmt.Sprintf("proxy deleteExecute upsert %d", it.ID()))
9898
collID := it.upsertMsg.DeleteMsg.CollectionID
99-
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIds
99+
it.upsertMsg.DeleteMsg.PrimaryKeys = it.oldIDs
100100
log := log.Ctx(ctx).With(
101101
zap.Int64("collectionID", collID))
102102
// hash primary keys to channels

internal/proxy/util.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -1709,12 +1709,12 @@ func checkUpsertPrimaryFieldData(schema *schemapb.CollectionSchema, insertMsg *m
17091709
if !primaryFieldSchema.GetAutoID() {
17101710
return ids, ids, nil
17111711
}
1712-
newIds, err := parsePrimaryFieldData2IDs(newPrimaryFieldData)
1712+
newIDs, err := parsePrimaryFieldData2IDs(newPrimaryFieldData)
17131713
if err != nil {
17141714
log.Warn("parse primary field data to IDs failed", zap.Error(err))
17151715
return nil, nil, err
17161716
}
1717-
return newIds, ids, nil
1717+
return newIDs, ids, nil
17181718
}
17191719

17201720
func getPartitionKeyFieldData(fieldSchema *schemapb.FieldSchema, insertMsg *msgstream.InsertMsg) (*schemapb.FieldData, error) {

internal/storage/serde.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -779,7 +779,7 @@ var _ RecordWriter = (*multiFieldRecordWriter)(nil)
779779

780780
type multiFieldRecordWriter struct {
781781
fw *pqarrow.FileWriter
782-
fieldIds []FieldID
782+
fieldIDs []FieldID
783783
schema *arrow.Schema
784784

785785
numRows int
@@ -788,8 +788,8 @@ type multiFieldRecordWriter struct {
788788

789789
func (mfw *multiFieldRecordWriter) Write(r Record) error {
790790
mfw.numRows += r.Len()
791-
columns := make([]arrow.Array, len(mfw.fieldIds))
792-
for i, fieldId := range mfw.fieldIds {
791+
columns := make([]arrow.Array, len(mfw.fieldIDs))
792+
for i, fieldId := range mfw.fieldIDs {
793793
columns[i] = r.Column(fieldId)
794794
mfw.writtenUncompressed += uint64(CalculateArraySize(columns[i]))
795795
}
@@ -806,7 +806,7 @@ func (mfw *multiFieldRecordWriter) Close() error {
806806
return mfw.fw.Close()
807807
}
808808

809-
func newMultiFieldRecordWriter(fieldIds []FieldID, fields []arrow.Field, writer io.Writer) (*multiFieldRecordWriter, error) {
809+
func newMultiFieldRecordWriter(fieldIDs []FieldID, fields []arrow.Field, writer io.Writer) (*multiFieldRecordWriter, error) {
810810
schema := arrow.NewSchema(fields, nil)
811811
fw, err := pqarrow.NewFileWriter(schema, writer,
812812
parquet.NewWriterProperties(parquet.WithMaxRowGroupLength(math.MaxInt64)), // No additional grouping for now.
@@ -816,7 +816,7 @@ func newMultiFieldRecordWriter(fieldIds []FieldID, fields []arrow.Field, writer
816816
}
817817
return &multiFieldRecordWriter{
818818
fw: fw,
819-
fieldIds: fieldIds,
819+
fieldIDs: fieldIDs,
820820
schema: schema,
821821
}, nil
822822
}

internal/storage/serde_events.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -1079,7 +1079,7 @@ func (dsw *MultiFieldDeltalogStreamWriter) GetRecordWriter() (RecordWriter, erro
10791079
return dsw.rw, nil
10801080
}
10811081

1082-
fieldIds := []FieldID{common.RowIDField, common.TimeStampField} // Not used.
1082+
fieldIDs := []FieldID{common.RowIDField, common.TimeStampField} // Not used.
10831083
fields := []arrow.Field{
10841084
{
10851085
Name: "pk",
@@ -1093,7 +1093,7 @@ func (dsw *MultiFieldDeltalogStreamWriter) GetRecordWriter() (RecordWriter, erro
10931093
},
10941094
}
10951095

1096-
rw, err := newMultiFieldRecordWriter(fieldIds, fields, &dsw.buf)
1096+
rw, err := newMultiFieldRecordWriter(fieldIDs, fields, &dsw.buf)
10971097
if err != nil {
10981098
return nil, err
10991099
}

internal/util/streamrpc/streamer_test.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ func (s *ResultCacheServerSuite) TestSend() {
6666
s.Equal(6, len(msg.GetIds().GetIntId().GetData()))
6767
}
6868

69-
func generateIntIds(num int) *schemapb.IDs {
69+
func generateIntIDs(num int) *schemapb.IDs {
7070
data := make([]int64, num)
7171
for i := 0; i < num; i++ {
7272
data[i] = int64(i)
@@ -77,7 +77,7 @@ func generateIntIds(num int) *schemapb.IDs {
7777
}
7878
}
7979

80-
func generateStrIds(num int) *schemapb.IDs {
80+
func generateStrIDs(num int) *schemapb.IDs {
8181
data := make([]string, num)
8282
for i := 0; i < num; i++ {
8383
data[i] = strconv.FormatInt(int64(i), 10)
@@ -98,7 +98,7 @@ func (s *ResultCacheServerSuite) TestSplit() {
9898
cacheSrv := NewResultCacheServer(srv, 1024, 1024)
9999

100100
err := cacheSrv.Send(&internalpb.RetrieveResults{
101-
Ids: generateIntIds(1024),
101+
Ids: generateIntIDs(1024),
102102
})
103103
s.NoError(err)
104104

@@ -130,7 +130,7 @@ func (s *ResultCacheServerSuite) TestSplit() {
130130
cacheSrv := NewResultCacheServer(srv, 1024, 1024)
131131

132132
err := cacheSrv.Send(&internalpb.RetrieveResults{
133-
Ids: generateStrIds(2048),
133+
Ids: generateStrIDs(2048),
134134
})
135135
s.NoError(err)
136136

pkg/mq/msgstream/mq_msgstream_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -56,12 +56,12 @@ func TestMain(m *testing.M) {
5656
paramtable.Init()
5757
Params = paramtable.Get()
5858
mockKafkaCluster, err := kafka.NewMockCluster(1)
59-
defer mockKafkaCluster.Close()
6059
if err != nil {
6160
// nolint
6261
fmt.Printf("Failed to create MockCluster: %s\n", err)
6362
os.Exit(1)
6463
}
64+
defer mockKafkaCluster.Close()
6565
broker := mockKafkaCluster.BootstrapServers()
6666
Params.Save("kafka.brokerList", broker)
6767
// Disable pursuit mode for unit test by default

pkg/mq/msgstream/mqwrapper/kafka/kafka_client_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,12 @@ var Params = paramtable.Get()
2727
func TestMain(m *testing.M) {
2828
paramtable.Init()
2929
mockCluster, err := kafka.NewMockCluster(1)
30-
defer mockCluster.Close()
3130
if err != nil {
3231
// nolint
3332
fmt.Printf("Failed to create MockCluster: %s\n", err)
3433
os.Exit(1)
3534
}
35+
defer mockCluster.Close()
3636

3737
broker := mockCluster.BootstrapServers()
3838
Params.Save("kafka.brokerList", broker)

pkg/mq/msgstream/mqwrapper/nmq/nmq_server_test.go

+12-10
Original file line numberDiff line numberDiff line change
@@ -31,19 +31,21 @@ import (
3131
var natsServerAddress string
3232

3333
func TestMain(m *testing.M) {
34-
paramtable.Init()
34+
exitCode := func() int {
35+
paramtable.Init()
36+
storeDir, _ := os.MkdirTemp("", "milvus_mq_nmq")
37+
defer os.RemoveAll(storeDir)
3538

36-
storeDir, _ := os.MkdirTemp("", "milvus_mq_nmq")
37-
defer os.RemoveAll(storeDir)
39+
cfg := ParseServerOption(paramtable.Get())
40+
cfg.Opts.Port = server.RANDOM_PORT
41+
cfg.Opts.StoreDir = storeDir
42+
MustInitNatsMQ(cfg)
43+
defer CloseNatsMQ()
3844

39-
cfg := ParseServerOption(paramtable.Get())
40-
cfg.Opts.Port = server.RANDOM_PORT
41-
cfg.Opts.StoreDir = storeDir
42-
MustInitNatsMQ(cfg)
43-
defer CloseNatsMQ()
45+
natsServerAddress = Nmq.ClientURL()
46+
return m.Run()
47+
}()
4448

45-
natsServerAddress = Nmq.ClientURL()
46-
exitCode := m.Run()
4749
os.Exit(exitCode)
4850
}
4951

pkg/mq/msgstream/mqwrapper/rmq/rmq_client_test.go

+18-15
Original file line numberDiff line numberDiff line change
@@ -35,33 +35,36 @@ import (
3535
)
3636

3737
func TestMain(m *testing.M) {
38-
paramtable.Init()
39-
pt := paramtable.Get()
40-
pt.Save(pt.ServiceParam.MQCfg.EnablePursuitMode.Key, "false")
41-
42-
rand.Seed(time.Now().UnixNano())
43-
path := "/tmp/milvus/rdb_data"
44-
defer os.RemoveAll(path)
45-
paramtable.Get().Save("rocksmq.compressionTypes", "0,0,0,0,0")
46-
_ = server2.InitRocksMQ(path)
47-
exitCode := m.Run()
48-
defer server2.CloseRocksMQ()
38+
exitCode := func() int {
39+
paramtable.Init()
40+
pt := paramtable.Get()
41+
pt.Save(pt.ServiceParam.MQCfg.EnablePursuitMode.Key, "false")
42+
43+
rand.Seed(time.Now().UnixNano())
44+
path := "/tmp/milvus/rdb_data"
45+
defer os.RemoveAll(path)
46+
paramtable.Get().Save("rocksmq.compressionTypes", "0,0,0,0,0")
47+
_ = server2.InitRocksMQ(path)
48+
defer server2.CloseRocksMQ()
49+
return m.Run()
50+
}()
51+
4952
os.Exit(exitCode)
5053
}
5154

5255
func Test_NewRmqClient(t *testing.T) {
5356
client, err := createRmqClient()
54-
defer client.Close()
5557
assert.NoError(t, err)
5658
assert.NotNil(t, client)
59+
client.Close()
5760
}
5861

5962
func TestRmqClient_CreateProducer(t *testing.T) {
6063
opts := client3.Options{}
6164
client, err := NewClient(opts)
62-
defer client.Close()
6365
assert.NoError(t, err)
6466
assert.NotNil(t, client)
67+
defer client.Close()
6568

6669
topic := "TestRmqClient_CreateProducer"
6770
proOpts := common.ProducerOptions{Topic: topic}
@@ -143,9 +146,9 @@ func TestRmqClient_GetLatestMsg(t *testing.T) {
143146

144147
func TestRmqClient_Subscribe(t *testing.T) {
145148
client, err := createRmqClient()
146-
defer client.Close()
147149
assert.NoError(t, err)
148150
assert.NotNil(t, client)
151+
defer client.Close()
149152

150153
topic := "TestRmqClient_Subscribe"
151154
proOpts := common.ProducerOptions{Topic: topic}
@@ -178,9 +181,9 @@ func TestRmqClient_Subscribe(t *testing.T) {
178181

179182
consumerOpts.Topic = topic
180183
consumer, err = client.Subscribe(context.TODO(), consumerOpts)
181-
defer consumer.Close()
182184
assert.NoError(t, err)
183185
assert.NotNil(t, consumer)
186+
defer consumer.Close()
184187
assert.Equal(t, consumer.Subscription(), subName)
185188

186189
msg := &common.ProducerMessage{

pkg/util/funcutil/slice_test.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -43,13 +43,13 @@ func Test_SliceContain(t *testing.T) {
4343
}
4444

4545
for _, test := range cases {
46-
switch test.item.(type) {
46+
switch v := test.item.(type) {
4747
case string:
48-
if got := SliceContain(test.s.([]string), test.item.(string)); got != test.want {
48+
if got := SliceContain(test.s.([]string), v); got != test.want {
4949
t.Errorf("SliceContain(%v, %v) = %v", test.s, test.item, test.want)
5050
}
5151
case int:
52-
if got := SliceContain(test.s.([]int), test.item.(int)); got != test.want {
52+
if got := SliceContain(test.s.([]int), v); got != test.want {
5353
t.Errorf("SliceContain(%v, %v) = %v", test.s, test.item, test.want)
5454
}
5555
}

pkg/util/interceptor/server_id_interceptor_test.go

+2-6
Original file line numberDiff line numberDiff line change
@@ -78,9 +78,7 @@ func TestServerIDInterceptor(t *testing.T) {
7878
return nil, nil
7979
}
8080
serverInfo := &grpc.UnaryServerInfo{FullMethod: method}
81-
interceptor := ServerIDValidationUnaryServerInterceptor(func() int64 {
82-
return paramtable.GetNodeID()
83-
})
81+
interceptor := ServerIDValidationUnaryServerInterceptor(paramtable.GetNodeID)
8482

8583
// no md in context
8684
_, err := interceptor(context.Background(), req, serverInfo, handler)
@@ -114,9 +112,7 @@ func TestServerIDInterceptor(t *testing.T) {
114112
handler := func(srv interface{}, stream grpc.ServerStream) error {
115113
return nil
116114
}
117-
interceptor := ServerIDValidationStreamServerInterceptor(func() int64 {
118-
return paramtable.GetNodeID()
119-
})
115+
interceptor := ServerIDValidationStreamServerInterceptor(paramtable.GetNodeID)
120116

121117
// no md in context
122118
err := interceptor(nil, newMockSS(context.Background()), nil, handler)

pkg/util/merr/utils.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ func oldCode(code int32) commonpb.ErrorCode {
154154
case ErrNodeNotMatch.code():
155155
return commonpb.ErrorCode_NodeIDNotMatch
156156

157-
case ErrCollectionNotFound.code(), ErrPartitionNotFound.code(), ErrReplicaNotFound.code():
157+
case ErrPartitionNotFound.code(), ErrReplicaNotFound.code():
158158
return commonpb.ErrorCode_MetaFailed
159159

160160
case ErrReplicaNotAvailable.code(), ErrChannelNotAvailable.code(), ErrNodeNotAvailable.code():

0 commit comments

Comments
 (0)