Skip to content

Commit

Permalink
Fix issues of multiple published ports mapping to the same target port
Browse files Browse the repository at this point in the history
This fix tries to address the issue raised in moby/moby#29370
where a service with multiple published ports mapping to the same target
port (e.g., `--publish 5000:80 --publish 5001:80`) can't be allocated.

The reason for the issue is that, `getPortConfigKey` is used for both
allocated ports and configured (may or may not be allocated) ports.
However, `getPortConfigKey` will not take into consideration the
`PublishedPort` field, which actually could be different for different
allocated ports.

This fix saves a map of `portKey:portNum:portState`,  instead of currently
used `portKey:portState` so that multiple published ports could be processed.

A test case has been added in the unit test. The newly added test case
will only work with this PR.

Signed-off-by: Yong Tang <[email protected]>
  • Loading branch information
yongtang committed Dec 30, 2016
1 parent 74b49ee commit c786e14
Show file tree
Hide file tree
Showing 2 changed files with 153 additions and 33 deletions.
129 changes: 96 additions & 33 deletions manager/allocator/networkallocator/portallocator.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,54 @@ type portSpace struct {
dynamicPortSpace *idm.Idm
}

type allocatedPorts map[api.PortConfig]map[uint32]*api.PortConfig

func (ps allocatedPorts) addState(p *api.PortConfig) {
portKey := getPortConfigKey(p)
if _, ok := ps[portKey]; !ok {
ps[portKey] = make(map[uint32]*api.PortConfig)
}
ps[portKey][p.PublishedPort] = p
}

func (ps allocatedPorts) delState(p *api.PortConfig) *api.PortConfig {
portKey := getPortConfigKey(p)

portStateMap, ok := ps[portKey]

// If name, port, protocol values don't match then we
// are not allocated.
if !ok {
return nil
}

if p.PublishedPort != 0 {
// If SwarmPort was user defined but the port state
// SwarmPort doesn't match we are not allocated.
v, ok := portStateMap[p.PublishedPort]

if !ok {
return nil
}
// Dlete state from allocatedPorts
delete(ps[portKey], p.PublishedPort)

return v
}

// If PublishedPort == 0 and we don't have non-zero port
// then we are not allocated
for publishedPort, v := range portStateMap {
if publishedPort != 0 {
// Dlete state from allocatedPorts
delete(ps[portKey], publishedPort)
return v
}
}

return nil
}

func newPortAllocator() (*portAllocator, error) {
portSpaces := make(map[api.PortConfig_Protocol]*portSpace)
for _, protocol := range []api.PortConfig_Protocol{api.ProtocolTCP, api.ProtocolUDP} {
Expand Down Expand Up @@ -73,7 +121,7 @@ func newPortSpace(protocol api.PortConfig_Protocol) (*portSpace, error) {
}, nil
}

// getPortConfigKey returns a map key for doing set operations with
// getPortConfigkey returns a map key for doing set operations with
// ports. The key consists of name, protocol and target port which
// uniquely identifies a port within a single Endpoint.
func getPortConfigKey(p *api.PortConfig) api.PortConfig {
Expand All @@ -91,40 +139,63 @@ func reconcilePortConfigs(s *api.Service) []*api.PortConfig {
return s.Spec.Endpoint.Ports
}

allocatedPorts := make(map[api.PortConfig]*api.PortConfig)
portStates := allocatedPorts{}
for _, portState := range s.Endpoint.Ports {
if portState.PublishMode != api.PublishModeIngress {
continue
}

allocatedPorts[getPortConfigKey(portState)] = portState
portStates.addState(portState)
}

var portConfigs []*api.PortConfig

// Process the portConfig with portConfig.PublishMode != api.PublishModeIngress
// Iterate portConfigs with PublishModeIngress
for _, portConfig := range s.Spec.Endpoint.Ports {
// If the PublishMode is not Ingress simply pick up
// the port config.
if portConfig.PublishMode != api.PublishModeIngress {
portConfigs = append(portConfigs, portConfig)
continue
}
}

// Iterate portConfigs with PublishedPort != 0 (high priority)
for _, portConfig := range s.Spec.Endpoint.Ports {
// Ignore ports which are not PublishModeIngress (already processed)
if portConfig.PublishMode != api.PublishModeIngress {
continue
}
if portConfig.PublishedPort != 0 {
// Remove record from portState
portStates.delState(portConfig)

portState, ok := allocatedPorts[getPortConfigKey(portConfig)]
// For PublishedPort != 0 prefer the portConfig
portConfigs = append(portConfigs, portConfig)
}
}

// Iterate portConfigs with PublishedPort == 0 (low priority)
for _, portConfig := range s.Spec.Endpoint.Ports {
// Ignore ports which are not PublishModeIngress (already processed)
if portConfig.PublishMode != api.PublishModeIngress {
continue
}

// If the portConfig is exactly the same as portState
// except if SwarmPort is not user-define then prefer
// portState to ensure sticky allocation of the same
// port that was allocated before.
if ok && portConfig.Name == portState.Name &&
portConfig.TargetPort == portState.TargetPort &&
portConfig.Protocol == portState.Protocol &&
portConfig.PublishedPort == 0 {
portConfigs = append(portConfigs, portState)
continue
}
if portConfig.PublishedPort == 0 {
// Remove record from portState
if portState := portStates.delState(portConfig); portState != nil {
portConfigs = append(portConfigs, portState)
continue
}

// For all other cases prefer the portConfig
portConfigs = append(portConfigs, portConfig)
// For all other cases prefer the portConfig
portConfigs = append(portConfigs, portConfig)
}
}

return portConfigs
Expand Down Expand Up @@ -213,40 +284,32 @@ func (pa *portAllocator) isPortsAllocated(s *api.Service) bool {
return false
}

allocatedPorts := make(map[api.PortConfig]*api.PortConfig)
portStates := allocatedPorts{}
for _, portState := range s.Endpoint.Ports {
if portState.PublishMode != api.PublishModeIngress {
continue
}

allocatedPorts[getPortConfigKey(portState)] = portState
portStates.addState(portState)
}

// Iterate portConfigs with PublishedPort != 0 (high priority)
for _, portConfig := range s.Spec.Endpoint.Ports {
// Ignore ports which are not PublishModeIngress
if portConfig.PublishMode != api.PublishModeIngress {
continue
}

portState, ok := allocatedPorts[getPortConfigKey(portConfig)]

// If name, port, protocol values don't match then we
// are not allocated.
if !ok {
if portConfig.PublishedPort != 0 && portStates.delState(portConfig) == nil {
return false
}
}

// If SwarmPort was user defined but the port state
// SwarmPort doesn't match we are not allocated.
if portConfig.PublishedPort != portState.PublishedPort &&
portConfig.PublishedPort != 0 {
return false
// Iterate portConfigs with PublishedPort == 0 (low priority)
for _, portConfig := range s.Spec.Endpoint.Ports {
// Ignore ports which are not PublishModeIngress
if portConfig.PublishMode != api.PublishModeIngress {
continue
}

// If SwarmPort was not defined by user and port state
// is not initialized with a valid SwarmPort value then
// we are not allocated.
if portConfig.PublishedPort == 0 && portState.PublishedPort == 0 {
if portConfig.PublishedPort == 0 && portStates.delState(portConfig) == nil {
return false
}
}
Expand Down
57 changes: 57 additions & 0 deletions manager/allocator/networkallocator/portallocator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -550,6 +550,63 @@ func TestIsPortsAllocated(t *testing.T) {
},
expect: true,
},
{
// Endpoint and Spec.Endpoint have multiple PublishedPort
// See docker/docker#29730
input: &api.Service{
Spec: api.ServiceSpec{
Endpoint: &api.EndpointSpec{
Ports: []*api.PortConfig{
{
Protocol: api.ProtocolTCP,
TargetPort: 80,
PublishedPort: 5000,
},
{
Protocol: api.ProtocolTCP,
TargetPort: 80,
PublishedPort: 5001,
},
{
Protocol: api.ProtocolTCP,
TargetPort: 80,
PublishedPort: 0,
},
{
Protocol: api.ProtocolTCP,
TargetPort: 80,
PublishedPort: 0,
},
},
},
},
Endpoint: &api.Endpoint{
Ports: []*api.PortConfig{
{
Protocol: api.ProtocolTCP,
TargetPort: 80,
PublishedPort: 5000,
},
{
Protocol: api.ProtocolTCP,
TargetPort: 80,
PublishedPort: 5001,
},
{
Protocol: api.ProtocolTCP,
TargetPort: 80,
PublishedPort: 30000,
},
{
Protocol: api.ProtocolTCP,
TargetPort: 80,
PublishedPort: 30001,
},
},
},
},
expect: true,
},
}

for _, singleTest := range testCases {
Expand Down

0 comments on commit c786e14

Please sign in to comment.