@@ -162,11 +162,11 @@ type EtcdServer struct {
162
162
// count the number of inflight snapshots.
163
163
// MUST use atomic operation to access this field.
164
164
inflightSnapshots int64
165
+ Cfg * ServerConfig
165
166
166
167
readych chan struct {}
167
168
r raftNode
168
169
169
- cfg * ServerConfig
170
170
snapCount uint64
171
171
172
172
w wait.Wait
@@ -369,7 +369,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
369
369
370
370
srv = & EtcdServer {
371
371
readych : make (chan struct {}),
372
- cfg : cfg ,
372
+ Cfg : cfg ,
373
373
snapCount : cfg .SnapCount ,
374
374
errorc : make (chan error , 1 ),
375
375
store : st ,
@@ -444,7 +444,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
444
444
// It also starts a goroutine to publish its server information.
445
445
func (s * EtcdServer ) Start () {
446
446
s .start ()
447
- go s .publish (s .cfg .ReqTimeout ())
447
+ go s .publish (s .Cfg .ReqTimeout ())
448
448
go s .purgeFile ()
449
449
go monitorFileDescriptor (s .done )
450
450
go s .monitorVersions ()
@@ -473,11 +473,11 @@ func (s *EtcdServer) start() {
473
473
474
474
func (s * EtcdServer ) purgeFile () {
475
475
var serrc , werrc <- chan error
476
- if s .cfg .MaxSnapFiles > 0 {
477
- serrc = fileutil .PurgeFile (s .cfg .SnapDir (), "snap" , s .cfg .MaxSnapFiles , purgeFileInterval , s .done )
476
+ if s .Cfg .MaxSnapFiles > 0 {
477
+ serrc = fileutil .PurgeFile (s .Cfg .SnapDir (), "snap" , s .Cfg .MaxSnapFiles , purgeFileInterval , s .done )
478
478
}
479
- if s .cfg .MaxWALFiles > 0 {
480
- werrc = fileutil .PurgeFile (s .cfg .WALDir (), "wal" , s .cfg .MaxWALFiles , purgeFileInterval , s .done )
479
+ if s .Cfg .MaxWALFiles > 0 {
480
+ werrc = fileutil .PurgeFile (s .Cfg .WALDir (), "wal" , s .Cfg .MaxWALFiles , purgeFileInterval , s .done )
481
481
}
482
482
select {
483
483
case e := <- werrc :
@@ -623,7 +623,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
623
623
plog .Panicf ("get database snapshot file path error: %v" , err )
624
624
}
625
625
626
- fn := path .Join (s .cfg .SnapDir (), databaseFilename )
626
+ fn := path .Join (s .Cfg .SnapDir (), databaseFilename )
627
627
if err := os .Rename (snapfn , fn ); err != nil {
628
628
plog .Panicf ("rename snapshot file error: %v" , err )
629
629
}
@@ -764,7 +764,7 @@ func (s *EtcdServer) LeaderStats() []byte {
764
764
func (s * EtcdServer ) StoreStats () []byte { return s .store .JsonStats () }
765
765
766
766
func (s * EtcdServer ) AddMember (ctx context.Context , memb membership.Member ) error {
767
- if s .cfg .StrictReconfigCheck && ! s .cluster .IsReadyToAddNewMember () {
767
+ if s .Cfg .StrictReconfigCheck && ! s .cluster .IsReadyToAddNewMember () {
768
768
// If s.cfg.StrictReconfigCheck is false, it means the option --strict-reconfig-check isn't passed to etcd.
769
769
// In such a case adding a new member is allowed unconditionally
770
770
return ErrNotEnoughStartedMembers
@@ -784,7 +784,7 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) erro
784
784
}
785
785
786
786
func (s * EtcdServer ) RemoveMember (ctx context.Context , id uint64 ) error {
787
- if s .cfg .StrictReconfigCheck && ! s .cluster .IsReadyToRemoveMember (id ) {
787
+ if s .Cfg .StrictReconfigCheck && ! s .cluster .IsReadyToRemoveMember (id ) {
788
788
// If s.cfg.StrictReconfigCheck is false, it means the option --strict-reconfig-check isn't passed to etcd.
789
789
// In such a case removing a member is allowed unconditionally
790
790
return ErrNotEnoughStartedMembers
@@ -823,7 +823,7 @@ func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) }
823
823
824
824
func (s * EtcdServer ) Leader () types.ID { return types .ID (s .Lead ()) }
825
825
826
- func (s * EtcdServer ) IsPprofEnabled () bool { return s .cfg .EnablePprof }
826
+ func (s * EtcdServer ) IsPprofEnabled () bool { return s .Cfg .EnablePprof }
827
827
828
828
// configure sends a configuration change through consensus and
829
829
// then waits for it to be applied to the server. It
@@ -939,7 +939,7 @@ func (s *EtcdServer) send(ms []raftpb.Message) {
939
939
ok , exceed := s .r .td .Observe (ms [i ].To )
940
940
if ! ok {
941
941
// TODO: limit request rate.
942
- plog .Warningf ("failed to send out heartbeat on time (exceeded the %dms timeout for %v)" , s .cfg .TickMs , exceed )
942
+ plog .Warningf ("failed to send out heartbeat on time (exceeded the %dms timeout for %v)" , s .Cfg .TickMs , exceed )
943
943
plog .Warningf ("server is likely overloaded" )
944
944
}
945
945
}
@@ -1221,7 +1221,7 @@ func (s *EtcdServer) updateClusterVersion(ver string) {
1221
1221
Path : membership .StoreClusterVersionKey (),
1222
1222
Val : ver ,
1223
1223
}
1224
- ctx , cancel := context .WithTimeout (context .Background (), s .cfg .ReqTimeout ())
1224
+ ctx , cancel := context .WithTimeout (context .Background (), s .Cfg .ReqTimeout ())
1225
1225
_ , err := s .Do (ctx , req )
1226
1226
cancel ()
1227
1227
switch err {
@@ -1241,7 +1241,7 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
1241
1241
return ErrCanceled
1242
1242
case context .DeadlineExceeded :
1243
1243
curLeadElected := s .r .leadElectedTime ()
1244
- prevLeadLost := curLeadElected .Add (- 2 * time .Duration (s .cfg .ElectionTicks ) * time .Duration (s .cfg .TickMs ) * time .Millisecond )
1244
+ prevLeadLost := curLeadElected .Add (- 2 * time .Duration (s .Cfg .ElectionTicks ) * time .Duration (s .Cfg .TickMs ) * time .Millisecond )
1245
1245
if start .After (prevLeadLost ) && start .Before (curLeadElected ) {
1246
1246
return ErrTimeoutDueToLeaderFail
1247
1247
}
0 commit comments