Skip to content

Commit

Permalink
Keras support 'Flatten'
Browse files Browse the repository at this point in the history
  • Loading branch information
yuanzexi committed Jul 2, 2021
1 parent 038f393 commit d28e5da
Show file tree
Hide file tree
Showing 2 changed files with 91 additions and 60 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class TLayerDescCreator<TrtShuffleDesc> : public ILayerDescCreator {
public:
bool Check(const Layer& layer) override {
const std::string name = layer.Type();
return name == "Permute";
return name == "Permute" || name == "Flatten";
}

std::shared_ptr<TrtLayerDesc> Create(const Layer& layer, const H5ModelReader& reader,
Expand All @@ -54,9 +54,28 @@ class TLayerDescCreator<TrtShuffleDesc> : public ILayerDescCreator {
return CreatePermute(layer, input_names);
}

if (name == "Flatten") {
return CreateFlatten(layer, input_names);
}

return nullptr;
}

private:
std::shared_ptr<TrtLayerDesc> CreateFlatten(const Layer& layer,
std::vector<std::string>& input_names) const {
input_names = layer.Inputs();
std::shared_ptr<TrtLayerDesc> default_return_value = nullptr;

// config
const std::string dtype = layer.GetAttr<std::string>("dtype");
T_CHECK_EQ(dtype, "float32");

auto layer_desc = std::make_shared<TrtShuffleDesc>();
layer_desc->reshapeDimensions = nvinfer1::Dims2{0, -1};
return layer_desc;
}

std::shared_ptr<TrtLayerDesc> CreatePermute(const Layer& layer,
std::vector<std::string>& input_names) const {
input_names = layer.Inputs();
Expand Down
130 changes: 71 additions & 59 deletions source/unit_test/test_keras_nodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,15 +48,15 @@ class TestKerasNodes : public ::testing::Test {
std::vector<std::pair<std::string, TF_Tensor*>> input_map;
};

TEST_F(TestKerasNodes, Softmax) {
pb_path = pb_path + "softmax.pb";
keras_h5_path = keras_h5_path + "softmax.h5";
TEST_F(TestKerasNodes, AvgPool) {
pb_path = pb_path + "average_pooling.pb";
keras_h5_path = keras_h5_path + "average_pooling.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 12, 24, 3});
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 29, 17, 3});

input_map.push_back({"input_11", input.get()});
output_names = {"softmax/Softmax"};
input_map.push_back({"input", input.get()});
output_names = {"average_pooling2d/AvgPool", "average_pooling2d_1/AvgPool"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

Expand All @@ -73,18 +73,6 @@ TEST_F(TestKerasNodes, Activation) {
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, BatchNorm) {
pb_path = pb_path + "batch_norm.pb";
keras_h5_path = keras_h5_path + "batch_norm.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 24, 24, 3});

input_map.push_back({"input_1", input.get()});
output_names = {"batch_normalization/FusedBatchNormV3"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, Arithmetic) {
pb_path = pb_path + "arithmetic.pb";
keras_h5_path = keras_h5_path + "arithmetic.h5";
Expand All @@ -99,6 +87,18 @@ TEST_F(TestKerasNodes, Arithmetic) {
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, BatchNorm) {
pb_path = pb_path + "batch_norm.pb";
keras_h5_path = keras_h5_path + "batch_norm.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 24, 24, 3});

input_map.push_back({"input_1", input.get()});
output_names = {"batch_normalization/FusedBatchNormV3"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, Concatenate) {
pb_path = pb_path + "concatenate.pb";
keras_h5_path = keras_h5_path + "concatenate.h5";
Expand Down Expand Up @@ -127,6 +127,18 @@ TEST_F(TestKerasNodes, Convolution) {
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, Conv2DActivation) {
pb_path = pb_path + "conv2d_activation.pb";
keras_h5_path = keras_h5_path + "conv2d_activation.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 23, 29, 3});

input_map.push_back({"input_1", input.get()});
output_names = {"conv2d/Relu"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, Cropping2D) {
pb_path = pb_path + "cropping2d.pb";
keras_h5_path = keras_h5_path + "cropping2d.h5";
Expand All @@ -151,39 +163,27 @@ TEST_F(TestKerasNodes, DepthwiseConv2d) {
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, SeparableConv2d) {
pb_path = pb_path + "separable_conv2d.pb";
keras_h5_path = keras_h5_path + "separable_conv2d.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 23, 29, 11});

input_map.push_back({"input_1", input.get()});
output_names = {"separable_conv2d/BiasAdd", "separable_conv2d_1/separable_conv2d"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, MaxPool) {
pb_path = pb_path + "max_pooling.pb";
keras_h5_path = keras_h5_path + "max_pooling.h5";
TEST_F(TestKerasNodes, Embedding) {
pb_path = pb_path + "embedding.pb";
keras_h5_path = keras_h5_path + "embedding.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 13, 33, 3});
const auto input = fwd::tf_::CreateRandomIntTensor<int>(TF_INT32, {batch_size, 10}, 1000);

input_map.push_back({"input", input.get()});
output_names = {"max_pooling2d/MaxPool", "max_pooling2d_1/MaxPool"};
input_map.push_back({"input1_1", input.get()});
output_names = {"embedding_4/embedding_lookup/Identity_1"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, AvgPool) {
pb_path = pb_path + "average_pooling.pb";
keras_h5_path = keras_h5_path + "average_pooling.h5";
TEST_F(TestKerasNodes, Flatten) {
pb_path = pb_path + "flatten.pb";
keras_h5_path = keras_h5_path + "flatten.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 29, 17, 3});
auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 24, 24, 3});

input_map.push_back({"input", input.get()});
output_names = {"average_pooling2d/AvgPool", "average_pooling2d_1/AvgPool"};
input_map.push_back({"input_6", input.get()});
output_names = {"flatten/Reshape"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

Expand All @@ -199,6 +199,18 @@ TEST_F(TestKerasNodes, FullyConnected) {
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, MaxPool) {
pb_path = pb_path + "max_pooling.pb";
keras_h5_path = keras_h5_path + "max_pooling.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 13, 33, 3});

input_map.push_back({"input", input.get()});
output_names = {"max_pooling2d/MaxPool", "max_pooling2d_1/MaxPool"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, Permute) {
pb_path = pb_path + "permute.pb";
keras_h5_path = keras_h5_path + "permute.h5";
Expand All @@ -224,40 +236,40 @@ TEST_F(TestKerasNodes, Reduce) {
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, ZeroPadding) {
pb_path = pb_path + "zero_padding_2d.pb";
keras_h5_path = keras_h5_path + "zero_padding_2d.h5";
TEST_F(TestKerasNodes, Softmax) {
pb_path = pb_path + "softmax.pb";
keras_h5_path = keras_h5_path + "softmax.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 12, 24, 3});

input_map.push_back({"input_1", input.get()});
output_names = {"zero_padding2d/Pad", "zero_padding2d_1/Pad", "zero_padding2d_2/Pad",
"zero_padding2d_3/Pad"};
input_map.push_back({"input_11", input.get()});
output_names = {"softmax/Softmax"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, Conv2DActivation) {
pb_path = pb_path + "conv2d_activation.pb";
keras_h5_path = keras_h5_path + "conv2d_activation.h5";
TEST_F(TestKerasNodes, SeparableConv2d) {
pb_path = pb_path + "separable_conv2d.pb";
keras_h5_path = keras_h5_path + "separable_conv2d.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 23, 29, 3});
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 23, 29, 11});

input_map.push_back({"input_1", input.get()});
output_names = {"conv2d/Relu"};
output_names = {"separable_conv2d/BiasAdd", "separable_conv2d_1/separable_conv2d"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

TEST_F(TestKerasNodes, Embedding) {
pb_path = pb_path + "embedding.pb";
keras_h5_path = keras_h5_path + "embedding.h5";
TEST_F(TestKerasNodes, ZeroPadding) {
pb_path = pb_path + "zero_padding_2d.pb";
keras_h5_path = keras_h5_path + "zero_padding_2d.h5";

const int batch_size = 1;
const auto input = fwd::tf_::CreateRandomIntTensor<int>(TF_INT32, {batch_size, 10}, 1000);
const auto input = fwd::tf_::CreateRandomTensor<float>(TF_FLOAT, {batch_size, 12, 24, 3});

input_map.push_back({"input1_1", input.get()});
output_names = {"embedding_4/embedding_lookup/Identity_1"};
input_map.push_back({"input_1", input.get()});
output_names = {"zero_padding2d/Pad", "zero_padding2d_1/Pad", "zero_padding2d_2/Pad",
"zero_padding2d_3/Pad"};
TestKerasInference(pb_path, keras_h5_path, input_map, output_names, batch_size, threshold);
}

Expand Down

0 comments on commit d28e5da

Please sign in to comment.