Skip to content
This repository has been archived by the owner on Sep 25, 2023. It is now read-only.

Commit

Permalink
add nn.Narrow layer
Browse files Browse the repository at this point in the history
  • Loading branch information
Oleg Poyaganov committed Aug 30, 2017
1 parent 4dea0fa commit e2c54eb
Show file tree
Hide file tree
Showing 5 changed files with 47 additions and 4 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ List of Torch7 layers that can be converted into their CoreML equivalent:
15. Tanh
16. MulConstant
17. SpatialZeroPadding
18. Narrow

## License

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from os import path


VERSION = '0.0.2'
VERSION = '0.0.3'

here = path.abspath(path.dirname(__file__))

Expand Down
13 changes: 11 additions & 2 deletions test/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,19 @@ def setUp(self):
_, model_path = tempfile.mkstemp()
self.model_path = model_path
self.input = np.random.ranf(_INPUT_SHAPE)
self.torch_batch_mode = True

def tearDown(self):
os.remove(self.model_path)

def _forward_torch(self):
torch_model = load_lua(self.model_path)
input_tensor = torch.from_numpy(np.asarray([self.input])).float()
return torch_model.forward(input_tensor).numpy()[0]
if self.torch_batch_mode:
input_tensor = torch.from_numpy(np.asarray([self.input])).float()
return torch_model.forward(input_tensor).numpy()[0]
else:
input_tensor = torch.from_numpy(self.input).float()
return torch_model.forward(input_tensor).numpy()

def _forward_coreml(self):
from _torch_converter import convert
Expand Down Expand Up @@ -92,3 +97,7 @@ def test_full_convolution(self):

def test_batch_norm(self):
self._test_single_layer('nn.SpatialBatchNormalization(3)')

def test_narrow(self):
self.torch_batch_mode = False
self._test_single_layer('nn.Narrow(1, 1, 1)')
34 changes: 33 additions & 1 deletion torch2coreml/_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,37 @@ def _convert_zero_padding(builder, name, layer, input_names, output_names):
return output_names


def _convert_narrow(builder, name, layer, input_names, output_names):
dimension = layer.dimension
if len(layer.output.numpy().shape) == 4:
# as torch layer works with 4d tensor we should decrement dimension
dimension -= 1

if dimension == 0:
axis = 'channel'
elif dimension == 1:
axis = 'height'
elif dimension == 2:
axis = 'width'
else:
raise ValueError('Only 3d tensors are supported')

index = layer.index
length = layer.length

builder.add_slice(
name=name,
axis=axis,
start_index=index,
end_index=index + length,
stride=1,
input_name=input_names[0],
output_name=output_names[0]
)

return output_names


_TORCH_LAYER_REGISTRY = {
'Sequential': _convert_sequential,
'SpatialConvolution': _convert_convolution,
Expand All @@ -372,7 +403,8 @@ def _convert_zero_padding(builder, name, layer, input_names, output_names):
'Linear': _convert_linear,
'Tanh': _convert_tanh,
'MulConstant': _convert_mul_constant,
'SpatialZeroPadding': _convert_zero_padding
'SpatialZeroPadding': _convert_zero_padding,
'Narrow': _convert_narrow
}


Expand Down
1 change: 1 addition & 0 deletions torch2coreml/_torch_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ def _infer_torch_output_shape(torch_model, input_shape):
output_shape = torch_model.forward(input_tensor).numpy().shape
return output_shape
except:
# try batch mode
input_tensor = torch.rand(1, *input_shape).float()
output_shape = torch_model.forward(input_tensor).numpy().shape[1:]
return output_shape
Expand Down

0 comments on commit e2c54eb

Please sign in to comment.