Skip to content

Commit

Permalink
avoids adding kube-router specific rules to enforce network policies in
Browse files Browse the repository at this point in the history
built in chain used in fitler table

- adds one level of indirection
  INPUT   -> KUBE-ROUTER-INPUT
  FORWARD -> KUBE-ROUTER-FORWARD
  OUTPUT  -> KUBE-ROUTER-OUTPUT

- easy to troubleshoot with namesake custom chains

Fixes #185
  • Loading branch information
murali-reddy committed Jun 5, 2020
1 parent 8f5c959 commit e5a9900
Showing 1 changed file with 80 additions and 14 deletions.
94 changes: 80 additions & 14 deletions pkg/controllers/netpol/network_policy_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@ const (
kubeNetworkPolicyChainPrefix = "KUBE-NWPLCY-"
kubeSourceIpSetPrefix = "KUBE-SRC-"
kubeDestinationIpSetPrefix = "KUBE-DST-"
kubeInputChainName = "KUBE-ROUTER-INPUT"
kubeForwardChainName = "KUBE-ROUTER-FORWARD"
kubeOutputChainName = "KUBE-ROUTER-OUTPUT"
)

// Network policy controller provides both ingress and egress filtering for the pods as per the defined network
Expand Down Expand Up @@ -141,6 +144,9 @@ func (npc *NetworkPolicyController) Run(healthChan chan<- *healthcheck.Controlle
glog.Info("Starting network policy controller")
npc.healthChan = healthChan

// setup kube-router specific top level cutoms chains
npc.ensureTopLevelChains()

// Full syncs of the network policy controller take a lot of time and can only be processed one at a time,
// therefore, we start it in it's own goroutine and request a sync through a single item channel
glog.Info("Starting network policy controller full sync goroutine")
Expand Down Expand Up @@ -179,6 +185,62 @@ func (npc *NetworkPolicyController) Run(healthChan chan<- *healthcheck.Controlle
}
}

// Creates custom chains KUBE-ROUTER-INPUT, KUBE-ROUTER-FORWARD, KUBE-ROUTER-OUTPUT
// and following rules in the filter table to jump from builtin chain to custom chain
// -A INPUT -m comment --comment "kube-router netpol" -j KUBE-ROUTER-INPUT
// -A FORWARD -m comment --comment "kube-router netpol" -j KUBE-ROUTER-FORWARD
// -A OUTPUT -m comment --comment "kube-router netpol" -j KUBE-ROUTER-OUTPUT
func (npc *NetworkPolicyController) ensureTopLevelChains() {

iptablesCmdHandler, err := iptables.New()
if err != nil {
glog.Fatalf("Failed to initialize iptables executor due to %s", err.Error())
}

chains := map[string]string{"INPUT": kubeInputChainName, "FORWARD": kubeForwardChainName, "OUTPUT": kubeOutputChainName}

for builtinChain, customChain := range chains {
err = iptablesCmdHandler.NewChain("filter", customChain)
if err != nil && err.(*iptables.Error).ExitStatus() != 1 {
glog.Fatalf("Failed to run iptables command to create %s chain due to %s", customChain, err.Error())
}
args := []string{"-m", "comment", "--comment", "kube-router netpol", "-j", customChain}
exists, err := iptablesCmdHandler.Exists("filter", builtinChain, args...)
if err != nil {
glog.Fatalf("Failed to verify rule exists to jump to chain %s in %s chain due to %s", customChain, builtinChain, err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", builtinChain, 1, args...)
if err != nil {
glog.Fatalf("Failed to run iptables command to insert in %s chain %s", builtinChain, err.Error())
}
} else {
rules, err := iptablesCmdHandler.List("filter", builtinChain)
if err != nil {
glog.Fatalf("failed to list rules in filter table %s chain due to %s", builtinChain, err.Error())
}

var ruleNo int
for i, rule := range rules {
if strings.Contains(rule, customChain) {
ruleNo = i
break
}
}
if ruleNo != 1 {
err = iptablesCmdHandler.Insert("filter", builtinChain, 1, args...)
if err != nil {
glog.Fatalf("Failed to run iptables command to insert in %s chain %s", builtinChain, err.Error())
}
err = iptablesCmdHandler.Delete("filter", builtinChain, strconv.Itoa(ruleNo+1))
if err != nil {
glog.Fatalf("Failed to delete wrong rule to jump to chain %s in %s chain due to %s", customChain, builtinChain, err.Error())
}
}
}
}
}

// OnPodUpdate handles updates to pods from the Kubernetes api server
func (npc *NetworkPolicyController) OnPodUpdate(obj interface{}) {
pod := obj.(*api.Pod)
Expand Down Expand Up @@ -225,6 +287,10 @@ func (npc *NetworkPolicyController) fullPolicySync() {
}()

glog.V(1).Infof("Starting sync of iptables with version: %s", syncVersion)

// ensure kube-router specific top level chains and corresponding rules exist
npc.ensureTopLevelChains()

if npc.v1NetworkPolicy {
networkPoliciesInfo, err = npc.buildNetworkPoliciesInfo()
if err != nil {
Expand Down Expand Up @@ -760,25 +826,25 @@ func (npc *NetworkPolicyController) syncPodFirewallChains(networkPoliciesInfo []
comment = "rule to jump traffic destined to POD name:" + pod.name + " namespace: " + pod.namespace +
" to chain " + podFwChainName
args = []string{"-m", "comment", "--comment", comment, "-d", pod.ip, "-j", podFwChainName}
exists, err = iptablesCmdHandler.Exists("filter", "FORWARD", args...)
exists, err = iptablesCmdHandler.Exists("filter", kubeForwardChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", "FORWARD", 1, args...)
err := iptablesCmdHandler.Insert("filter", kubeForwardChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}

// ensure there is rule in filter table and OUTPUT chain to jump to pod specific firewall chain
// this rule applies to the traffic from a pod getting routed back to another pod on same node by service proxy
exists, err = iptablesCmdHandler.Exists("filter", "OUTPUT", args...)
exists, err = iptablesCmdHandler.Exists("filter", kubeOutputChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", "OUTPUT", 1, args...)
err := iptablesCmdHandler.Insert("filter", kubeOutputChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
Expand All @@ -792,12 +858,12 @@ func (npc *NetworkPolicyController) syncPodFirewallChains(networkPoliciesInfo []
"-m", "comment", "--comment", comment,
"-d", pod.ip,
"-j", podFwChainName}
exists, err = iptablesCmdHandler.Exists("filter", "FORWARD", args...)
exists, err = iptablesCmdHandler.Exists("filter", kubeForwardChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err = iptablesCmdHandler.Insert("filter", "FORWARD", 1, args...)
err = iptablesCmdHandler.Insert("filter", kubeForwardChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
Expand Down Expand Up @@ -874,7 +940,7 @@ func (npc *NetworkPolicyController) syncPodFirewallChains(networkPoliciesInfo []
}
}

egressFilterChains := []string{"FORWARD", "OUTPUT", "INPUT"}
egressFilterChains := []string{kubeInputChainName, kubeForwardChainName, kubeOutputChainName}
for _, chain := range egressFilterChains {
// ensure there is rule in filter table and FORWARD chain to jump to pod specific firewall chain
// this rule applies to the traffic getting forwarded/routed (traffic from the pod destinted
Expand Down Expand Up @@ -902,12 +968,12 @@ func (npc *NetworkPolicyController) syncPodFirewallChains(networkPoliciesInfo []
"-m", "comment", "--comment", comment,
"-s", pod.ip,
"-j", podFwChainName}
exists, err = iptablesCmdHandler.Exists("filter", "FORWARD", args...)
exists, err = iptablesCmdHandler.Exists("filter", kubeForwardChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err = iptablesCmdHandler.Insert("filter", "FORWARD", 1, args...)
err = iptablesCmdHandler.Insert("filter", kubeForwardChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
Expand Down Expand Up @@ -982,7 +1048,7 @@ func cleanupStaleRules(activePolicyChains, activePodFwChains, activePolicyIPSets
// remove stale iptables podFwChain references from the filter table chains
for _, podFwChain := range cleanupPodFwChains {

primaryChains := []string{"FORWARD", "OUTPUT", "INPUT"}
primaryChains := []string{kubeInputChainName, kubeForwardChainName, kubeOutputChainName}
for _, egressChain := range primaryChains {
forwardChainRules, err := iptablesCmdHandler.List("filter", egressChain)
if err != nil {
Expand Down Expand Up @@ -1536,7 +1602,7 @@ func (npc *NetworkPolicyController) Cleanup() {
}

// delete jump rules in FORWARD chain to pod specific firewall chain
forwardChainRules, err := iptablesCmdHandler.List("filter", "FORWARD")
forwardChainRules, err := iptablesCmdHandler.List("filter", kubeForwardChainName)
if err != nil {
glog.Errorf("Failed to delete iptables rules as part of cleanup")
return
Expand All @@ -1546,7 +1612,7 @@ func (npc *NetworkPolicyController) Cleanup() {
var realRuleNo int
for i, rule := range forwardChainRules {
if strings.Contains(rule, kubePodFirewallChainPrefix) {
err = iptablesCmdHandler.Delete("filter", "FORWARD", strconv.Itoa(i-realRuleNo))
err = iptablesCmdHandler.Delete("filter", kubeForwardChainName, strconv.Itoa(i-realRuleNo))
if err != nil {
glog.Errorf("Failed to delete iptables rule as part of cleanup: %s", err)
}
Expand All @@ -1555,7 +1621,7 @@ func (npc *NetworkPolicyController) Cleanup() {
}

// delete jump rules in OUTPUT chain to pod specific firewall chain
forwardChainRules, err = iptablesCmdHandler.List("filter", "OUTPUT")
forwardChainRules, err = iptablesCmdHandler.List("filter", kubeOutputChainName)
if err != nil {
glog.Errorf("Failed to delete iptables rules as part of cleanup")
return
Expand All @@ -1565,7 +1631,7 @@ func (npc *NetworkPolicyController) Cleanup() {
realRuleNo = 0
for i, rule := range forwardChainRules {
if strings.Contains(rule, kubePodFirewallChainPrefix) {
err = iptablesCmdHandler.Delete("filter", "OUTPUT", strconv.Itoa(i-realRuleNo))
err = iptablesCmdHandler.Delete("filter", kubeOutputChainName, strconv.Itoa(i-realRuleNo))
if err != nil {
glog.Errorf("Failed to delete iptables rule as part of cleanup: %s", err)
}
Expand Down

0 comments on commit e5a9900

Please sign in to comment.