-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.go
142 lines (114 loc) · 3.91 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
package main
import (
"context"
"github.com/weihesdlegend/Vacation-planner/iowrappers"
"github.com/weihesdlegend/Vacation-planner/utils"
"net/url"
"os"
"os/signal"
"sync"
"syscall"
"github.com/braintree/manners"
"github.com/kelseyhightower/envconfig"
log "github.com/sirupsen/logrus"
"github.com/weihesdlegend/Vacation-planner/planner"
"gopkg.in/yaml.v3"
)
const numWorkers = 5
type Config struct {
Server struct {
Domain string `envconfig:"DOMAIN"`
ServerPort string `envconfig:"PORT" default:"10000"`
}
Redis struct {
RedisUrl string `envconfig:"REDIS_URL" default:"redis://localhost:6379"`
RedisStreamName string `default:"stream:planning_api_usage"`
}
MapsClientApiKey string `default:"YOUR_GOOGLE_API_KEY" split_words:"true"`
GoogleOAuthClientID string `envconfig:"GOOGLE_OAUTH_CLIENT_ID"`
GoogleOAuthClientSecret string `envconfig:"GOOGLE_OAUTH_CLIENT_SECRET"`
GeonamesApiKey string `envconfig:"GEONAMES_API_KEY"`
BlobBucketId string `envconfig:"BLOB_BUCKET_ID"`
}
type Configurations struct {
Server struct {
GoogleMaps struct {
DetailedSearchFields []string `yaml:"detailed_search_fields"`
} `yaml:"google_maps"`
PlanSolver struct {
SamePlaceDedupeCountLimit int `yaml:"same_place_dedupe_count_limit"`
NearbyCitiesCountLimit int `yaml:"nearby_cities_count_limit"`
EnableMapsPhotoClient bool `yaml:"enable_maps_photo_client"`
} `yaml:"plan_solver"`
} `yaml:"server"`
}
func init() {
utils.LogErrorWithLevel(iowrappers.CreateLogger(), utils.LogFatal)
}
// flatten configs as a key-value map
func flattenConfig(configs *Configurations) map[string]interface{} {
flattenedConfigs := make(map[string]interface{})
flattenedConfigs["server:google_maps:detailed_search_fields"] = configs.Server.GoogleMaps.DetailedSearchFields
flattenedConfigs["server:plan_solver:same_place_dedupe_count_limit"] = configs.Server.PlanSolver.SamePlaceDedupeCountLimit
flattenedConfigs["server:plan_solver:nearby_cities_count_limit"] = configs.Server.PlanSolver.NearbyCitiesCountLimit
flattenedConfigs["server:plan_solver:enable_maps_photo_client"] = configs.Server.PlanSolver.EnableMapsPhotoClient
return flattenedConfigs
}
func RunServer() {
conf := Config{}
err := envconfig.Process("", &conf)
if err != nil {
log.Fatal(err)
}
redisURL, err := url.Parse(conf.Redis.RedisUrl)
if err != nil {
log.Fatal(err)
}
configFile, configFileReadErr := os.Open("config/config.yml")
if configFileReadErr != nil {
log.Fatalf("configs read failure: %v", configFileReadErr)
}
configs := &Configurations{}
configFileDecoder := yaml.NewDecoder(configFile)
if configFileDecodeErr := configFileDecoder.Decode(configs); configFileDecodeErr != nil {
log.Fatal(configFileDecodeErr)
}
myPlanner := planner.MyPlanner{}
myPlanner.Init(conf.MapsClientApiKey, redisURL, conf.Redis.RedisStreamName,
flattenConfig(configs), conf.GoogleOAuthClientID, conf.GoogleOAuthClientSecret,
conf.Server.Domain, conf.GeonamesApiKey, conf.BlobBucketId)
svr := myPlanner.SetupRouter(conf.Server.ServerPort)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
graceSvr := manners.NewWithServer(svr)
go listenForShutDownServer(c, graceSvr, &myPlanner)
err = graceSvr.ListenAndServe()
if err != nil {
log.Fatal(err)
}
log.Info("Server gracefully shut down.")
}
func main() {
RunServer()
}
func listenForShutDownServer(ch <-chan os.Signal, svr *manners.GracefulServer, myPlanner *planner.MyPlanner) {
// destroy zap logger
defer myPlanner.Destroy()
wg := &sync.WaitGroup{}
wg.Add(numWorkers)
// dispatch workers
for worker := 0; worker < numWorkers; worker++ {
go myPlanner.ProcessPlanningEvent(worker, wg)
}
myPlanner.Dispatcher.Run(context.Background())
go func() {
// wait for shut-down signal
<-ch
// close worker channels
close(myPlanner.PlanningEvents)
wg.Wait()
myPlanner.Dispatcher.Stop()
}()
myPlanner.Dispatcher.Wait()
svr.Close()
}