Skip to content

Commit

Permalink
Merge branch 'master' into no-overwrite-tar
Browse files Browse the repository at this point in the history
  • Loading branch information
tstromberg authored Mar 11, 2020
2 parents 701cf5c + d63d3bc commit e45b230
Show file tree
Hide file tree
Showing 33 changed files with 568 additions and 337 deletions.
88 changes: 54 additions & 34 deletions cmd/minikube/cmd/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,24 @@ func init() {
RootCmd.AddCommand(deleteCmd)
}

func deleteContainersAndVolumes() {
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will error if there is no container to delete
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, errs)
}

errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
}

errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
}
}

// runDelete handles the executes the flow of "minikube delete"
func runDelete(cmd *cobra.Command, args []string) {
if len(args) > 0 {
Expand All @@ -110,23 +128,9 @@ func runDelete(cmd *cobra.Command, args []string) {
}

if deleteAll {
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will error if there is no container to delete
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, err)
}
deleteContainersAndVolumes()

errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
}

errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
}

errs = DeleteProfiles(profilesToDelete)
errs := DeleteProfiles(profilesToDelete)
if len(errs) > 0 {
HandleDeletionErrors(errs)
} else {
Expand Down Expand Up @@ -185,13 +189,11 @@ func DeleteProfiles(profiles []*config.Profile) []error {
return errs
}

func deleteProfile(profile *config.Profile) error {
viper.Set(config.ProfileName, profile.Name)

delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, profile.Name)
func deleteProfileContainersAndVolumes(name string) {
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, name)
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
if errs != nil { // it will error if there is no container to delete
glog.Infof("error deleting containers for %s (might be okay):\n%v", profile.Name, errs)
glog.Infof("error deleting containers for %s (might be okay):\n%v", name, errs)
}
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
if errs != nil { // it will not error if there is nothing to delete
Expand All @@ -202,6 +204,13 @@ func deleteProfile(profile *config.Profile) error {
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
}
}

func deleteProfile(profile *config.Profile) error {
viper.Set(config.ProfileName, profile.Name)

deleteProfileContainersAndVolumes(profile.Name)

api, err := machine.NewAPIClient()
if err != nil {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("error getting client %v", err))
Expand Down Expand Up @@ -230,37 +239,48 @@ func deleteProfile(profile *config.Profile) error {
out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err})
}

deleteHosts(api, cc)

// In case DeleteHost didn't complete the job.
deleteProfileDirectory(profile.Name)

if err := deleteConfig(profile.Name); err != nil {
return err
}

if err := deleteContext(profile.Name); err != nil {
return err
}
out.T(out.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name})
return nil
}

func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
if cc != nil {
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
if err = machine.DeleteHost(api, machineName); err != nil {
if err := machine.DeleteHost(api, machineName); err != nil {
switch errors.Cause(err).(type) {
case mcnerror.ErrHostDoesNotExist:
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
default:
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName})
}
}
}
}
}

// In case DeleteHost didn't complete the job.
deleteProfileDirectory(profile.Name)

if err := config.DeleteProfile(profile.Name); err != nil {
func deleteConfig(profileName string) error {
if err := config.DeleteProfile(profileName); err != nil {
if config.IsNotExist(err) {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("\"%s\" profile does not exist", profile.Name))
delErr := profileDeletionErr(profileName, fmt.Sprintf("\"%s\" profile does not exist", profileName))
return DeletionError{Err: delErr, Errtype: MissingProfile}
}
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("failed to remove profile %v", err))
delErr := profileDeletionErr(profileName, fmt.Sprintf("failed to remove profile %v", err))
return DeletionError{Err: delErr, Errtype: Fatal}
}

if err := deleteContext(profile.Name); err != nil {
return err
}
out.T(out.Deleted, `Removed all traces of the "{{.name}}" cluster.`, out.V{"name": profile.Name})
return nil
}

Expand Down
110 changes: 78 additions & 32 deletions cmd/minikube/cmd/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ const (
minUsableMem = 1024 // Kubernetes will not start with less than 1GB
minRecommendedMem = 2000 // Warn at no lower than existing configurations
minimumCPUS = 2
minimumDiskSize = "2000mb"
minimumDiskSize = 2000
autoUpdate = "auto-update-drivers"
hostOnlyNicType = "host-only-nic-type"
natNicType = "nat-nic-type"
Expand Down Expand Up @@ -337,14 +337,7 @@ func runStart(cmd *cobra.Command, args []string) {
ssh.SetDefaultClient(ssh.External)
}

var existingAddons map[string]bool
if viper.GetBool(installAddons) {
existingAddons = map[string]bool{}
if existing != nil && existing.Addons != nil {
existingAddons = existing.Addons
}
}
kubeconfig, err := node.Start(mc, n, true, existingAddons)
kubeconfig, err := startNode(existing, mc, n)
if err != nil {
exit.WithError("Starting node", err)
}
Expand Down Expand Up @@ -389,6 +382,17 @@ func displayEnviron(env []string) {
}
}

func startNode(existing *config.ClusterConfig, mc config.ClusterConfig, n config.Node) (*kubeconfig.Settings, error) {
var existingAddons map[string]bool
if viper.GetBool(installAddons) {
existingAddons = map[string]bool{}
if existing != nil && existing.Addons != nil {
existingAddons = existing.Addons
}
}
return node.Start(mc, n, true, existingAddons)
}

func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error {
if kcs.KeepContext {
out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName})
Expand Down Expand Up @@ -427,8 +431,11 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
glog.Infof("kubectl: %s, cluster: %s (minor skew: %d)", client, cluster, minorSkew)

if client.Major != cluster.Major || minorSkew > 1 {
out.WarningT("{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster",
out.Ln("")
out.T(out.Warning, "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.",
out.V{"path": path, "client_version": client, "cluster_version": cluster})
out.T(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
out.V{"path": path, "client_version": client})
}
return nil
}
Expand Down Expand Up @@ -638,43 +645,62 @@ func validateUser(drvName string) {
}
}

// defaultMemorySize calculates the default memory footprint in MB
func defaultMemorySize(drvName string) int {
fallback := 2200
maximum := 6000

// memoryLimits returns the amount of memory allocated to the system and hypervisor
func memoryLimits(drvName string) (int, int, error) {
v, err := mem.VirtualMemory()
if err != nil {
return fallback
return -1, -1, err
}
available := v.Total / 1024 / 1024
sysLimit := int(v.Total / 1024 / 1024)
containerLimit := 0

// For KIC, do not allocate more memory than the container has available (+ some slack)
if driver.IsKIC(drvName) {
s, err := oci.DaemonInfo(drvName)
if err != nil {
return fallback
return -1, -1, err
}
containerLimit = int(s.TotalMemory / 1024 / 1024)
}
return sysLimit, containerLimit, nil
}

// suggestMemoryAllocation calculates the default memory footprint in MB
func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
fallback := 2200
maximum := 6000

if sysLimit > 0 && fallback > sysLimit {
return sysLimit
}

// If there are container limits, add tiny bit of slack for non-minikube components
if containerLimit > 0 {
if fallback > containerLimit {
return containerLimit
}
maximum = int(s.TotalMemory/1024/1024) - 128
maximum = containerLimit - 48
}

suggested := int(available / 4)
// Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number!
suggested := int(float32(sysLimit)/400.0) * 100

if suggested > maximum {
suggested = maximum
return maximum
}

if suggested < fallback {
suggested = fallback
return fallback
}

glog.Infof("Selecting memory default of %dMB, given %dMB available and %dMB maximum", suggested, available, maximum)
return suggested
}

// validateMemorySize validates the memory size matches the minimum recommended
func validateMemorySize() {
req := pkgutil.CalculateSizeInMB(viper.GetString(memory))
req, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exit.WithCodeT(exit.Config, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
if req < minUsableMem && !viper.GetBool(force) {
exit.WithCodeT(exit.Config, "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB",
out.V{"requested": req, "mininum": minUsableMem})
Expand Down Expand Up @@ -707,9 +733,13 @@ func validateCPUCount(local bool) {
// validateFlags validates the supplied flags against known bad combinations
func validateFlags(cmd *cobra.Command, drvName string) {
if cmd.Flags().Changed(humanReadableDiskSize) {
diskSizeMB := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if diskSizeMB < pkgutil.CalculateSizeInMB(minimumDiskSize) && !viper.GetBool(force) {
exit.WithCodeT(exit.Config, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": pkgutil.CalculateSizeInMB(minimumDiskSize)})
diskSizeMB, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exit.WithCodeT(exit.Config, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}

if diskSizeMB < minimumDiskSize && !viper.GetBool(force) {
exit.WithCodeT(exit.Config, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": minimumDiskSize})
}
}

Expand Down Expand Up @@ -817,9 +847,20 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
kubeNodeName = "m01"
}

mem := defaultMemorySize(drvName)
if viper.GetString(memory) != "" {
mem = pkgutil.CalculateSizeInMB(viper.GetString(memory))
sysLimit, containerLimit, err := memoryLimits(drvName)
if err != nil {
glog.Warningf("Unable to query memory limits: %v", err)
}

mem := suggestMemoryAllocation(sysLimit, containerLimit)
if cmd.Flags().Changed(memory) {
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exit.WithCodeT(exit.Config, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}

} else {
glog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
}

// Create the initial node, which will necessarily be a control plane
Expand All @@ -831,14 +872,19 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
Worker: true,
}

diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}

cfg := config.ClusterConfig{
Name: viper.GetString(config.ProfileName),
KeepContext: viper.GetBool(keepContext),
EmbedCerts: viper.GetBool(embedCerts),
MinikubeISO: viper.GetString(isoURL),
Memory: mem,
CPUs: viper.GetInt(cpus),
DiskSize: pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)),
DiskSize: diskSize,
Driver: drvName,
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
Expand Down
34 changes: 33 additions & 1 deletion cmd/minikube/cmd/start_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,9 @@ func TestGetKuberneterVersion(t *testing.T) {
}

func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
viper.SetDefault(memory, defaultMemorySize)
// Set default disk size value in lieu of flag init
viper.SetDefault(humanReadableDiskSize, defaultDiskSize)

originalEnv := os.Getenv("HTTP_PROXY")
defer func() {
err := os.Setenv("HTTP_PROXY", originalEnv)
Expand Down Expand Up @@ -124,3 +125,34 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
})
}
}

func TestSuggestMemoryAllocation(t *testing.T) {
var tests = []struct {
description string
sysLimit int
containerLimit int
want int
}{
{"128GB sys", 128000, 0, 6000},
{"64GB sys", 64000, 0, 6000},
{"16GB sys", 16384, 0, 4000},
{"odd sys", 14567, 0, 3600},
{"4GB sys", 4096, 0, 2200},
{"2GB sys", 2048, 0, 2048},
{"Unable to poll sys", 0, 0, 2200},
{"128GB sys, 16GB container", 128000, 16384, 16336},
{"64GB sys, 16GB container", 64000, 16384, 16000},
{"16GB sys, 4GB container", 16384, 4096, 4000},
{"4GB sys, 3.5GB container", 16384, 3500, 3452},
{"2GB sys, 2GB container", 16384, 2048, 2048},
{"2GB sys, unable to poll container", 16384, 0, 4000},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
got := suggestMemoryAllocation(test.sysLimit, test.containerLimit)
if got != test.want {
t.Errorf("defaultMemorySize(sys=%d, container=%d) = %d, want: %d", test.sysLimit, test.containerLimit, got, test.want)
}
})
}
}
2 changes: 1 addition & 1 deletion hack/jenkins/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ touch "${HTML_OUT}"
gopogh_status=$(gopogh -in "${JSON_OUT}" -out "${HTML_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
pessimistic_status="$completed with ${fail_num} / ${test_num} failures in ${elapsed}"
pessimistic_status="${fail_num} / ${test_num} failures"
description="completed with ${status} in ${elapsed} minute(s)."
if [ "$status" = "failure" ]; then
description="completed with ${pessimistic_status} in ${elapsed} minute(s)."
Expand Down
Loading

0 comments on commit e45b230

Please sign in to comment.