Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[static code gen]add error msg in composite maker code gen #51211

Merged
merged 11 commits into from
Mar 10, 2023
23 changes: 23 additions & 0 deletions paddle/fluid/operators/generator/templates/operator_utils.c.j2
Original file line number Diff line number Diff line change
Expand Up @@ -624,6 +624,7 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
void Apply() override {
//get inputs
{{construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, input_dict)}}
{{construct_composite_tensor_attr(attrs, fluid_attrs, attr_dict, op_name)}}
//get attr
{{construct_composite_attr(attrs, fluid_attrs, attr_dict)}}
//get output
Expand Down Expand Up @@ -691,6 +692,28 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{%- endfor %}
{%- endmacro %}

{% macro construct_composite_tensor_attr(attrs, fluid_attrs, attr_dict, op_name) %}
{% set attrs_length = attrs | length %}
{% for i in range(attrs_length) %}
{% if "tensor_name" in attr_dict[attrs[i]] %}
auto {{'tensor_' + attrs[i]}} = this->GetOptionalSingleForwardInput("{{attr_dict[attrs[i]]['tensor_name']}}");
if ({{'tensor_' + attrs[i]}}) {
PADDLE_THROW(platform::errors::Unimplemented(
"We don't support dynamic tensor attribute {{attr_dict[attrs[i]]['tensor_name']}} for {{op_name}} composite"
"for now. "));
}
{%- endif %}
{% if "tensors_name" in attr_dict[attrs[i]] %}
auto {{'tensors_' + attrs[i]}} = this->GetOptionalMultiForwardInput("{{attr_dict[attrs[i]]['tensors_name']}}");
if ({{'tensors_' + attrs[i]}}) {
PADDLE_THROW(platform::errors::Unimplemented(
"We don't support dynamic tensors attribute {{attr_dict[attrs[i]]['tensor_name']}} for {{op_name}} composite "
"for now. "));
}
{%- endif %}
{%- endfor %}
{%- endmacro %}

{% macro construct_composite_attr(attrs, fluid_attrs, attr_dict) %}
{% set attrs_length = attrs | length %}
{% for i in range(attrs_length) %}
Expand Down
15 changes: 7 additions & 8 deletions paddle/fluid/prim/tests/test_static_prim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -282,8 +282,11 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) {
grad_sub_block);
test();
std::vector<paddle::Tensor> muti_fw_input = test.GetMultiForwardInput("X");
std::vector<paddle::optional<paddle::Tensor>> opt_muti_fw_input =
paddle::optional<std::vector<paddle::Tensor>> opt_muti_fw_input =
test.GetOptionalMultiForwardInput("X");
std::vector<paddle::Tensor> opt_inner = opt_muti_fw_input.is_initialized()
? opt_muti_fw_input.get()
: std::vector<paddle::Tensor>{};
paddle::Tensor fw_out = test.GetSingleForwardOutput("Out");
paddle::Tensor* fw_out_ptr = test.GetOutputPtr(&fw_out);
std::string fw_out_name = test.GetOutputName(fw_out);
Expand All @@ -295,14 +298,10 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) {
ASSERT_EQ(
static_cast<prim::DescTensor*>(muti_fw_input[1].impl().get())->Name(),
"x1");
ASSERT_EQ(opt_muti_fw_input.size(), static_cast<std::size_t>(2));
ASSERT_EQ(static_cast<prim::DescTensor*>(
opt_muti_fw_input[0].get_ptr()->impl().get())
->Name(),
ASSERT_EQ(opt_inner.size(), static_cast<std::size_t>(2));
ASSERT_EQ(static_cast<prim::DescTensor*>(opt_inner[0].impl().get())->Name(),
"x0");
ASSERT_EQ(static_cast<prim::DescTensor*>(
opt_muti_fw_input[1].get_ptr()->impl().get())
->Name(),
ASSERT_EQ(static_cast<prim::DescTensor*>(opt_inner[1].impl().get())->Name(),
"x1");
ASSERT_EQ(&fw_out, fw_out_ptr);
ASSERT_EQ(fw_out_name, "out");
Expand Down
49 changes: 31 additions & 18 deletions paddle/fluid/prim/utils/static/composite_grad_desc_maker.h
Original file line number Diff line number Diff line change
Expand Up @@ -202,58 +202,71 @@ class CompositeGradOpMakerBase {
return inputs_grads;
}

std::vector<paddle::optional<paddle::Tensor>> GetOptionalMultiForwardOutput(
paddle::optional<std::vector<paddle::Tensor>> GetOptionalMultiForwardOutput(
const std::string& name) {
std::vector<paddle::optional<paddle::Tensor>> outputs_opt;
paddle::optional<std::vector<paddle::Tensor>> outputs_opt;
std::vector<framework::VarDesc*> outputs_descs =
this->MultiForwardOutput(name);
outputs_opt.reserve(outputs_descs.size());
if ((outputs_descs.empty())) {
return outputs_opt;
}
std::vector<paddle::Tensor> outputs;
outputs.reserve(outputs_descs.size());
for (const auto& output_desc : outputs_descs) {
if (output_desc) {
outputs_opt.emplace_back(paddle::make_optional<paddle::Tensor>(
outputs.emplace_back(paddle::Tensor(
paddle::Tensor(std::make_shared<DescTensor>(output_desc))));
} else {
outputs_opt.emplace_back(
paddle::make_optional<paddle::Tensor>(paddle::Tensor()));
outputs.emplace_back(paddle::Tensor(paddle::Tensor()));
}
}
outputs_opt = paddle::make_optional<std::vector<paddle::Tensor>>(outputs);
return outputs_opt;
}

std::vector<paddle::optional<paddle::Tensor>> GetOptionalMultiForwardInput(
paddle::optional<std::vector<paddle::Tensor>> GetOptionalMultiForwardInput(
const std::string& name) {
std::vector<paddle::optional<paddle::Tensor>> inputs_opt;
paddle::optional<std::vector<paddle::Tensor>> inputs_opt;
std::vector<framework::VarDesc*> inputs_descs =
this->MultiForwardInput(name);
inputs_opt.reserve(inputs_descs.size());
if ((inputs_descs.empty())) {
return inputs_opt;
}
std::vector<paddle::Tensor> inputs;
inputs.reserve(inputs_descs.size());
for (const auto& input_desc : inputs_descs) {
if (input_desc) {
inputs_opt.emplace_back(paddle::make_optional<paddle::Tensor>(
inputs.emplace_back(paddle::Tensor(
paddle::Tensor(std::make_shared<DescTensor>(input_desc))));
} else {
inputs_opt.emplace_back(
paddle::make_optional<paddle::Tensor>(paddle::Tensor()));
inputs.emplace_back(paddle::Tensor(paddle::Tensor()));
}
}
inputs_opt = paddle::make_optional<std::vector<paddle::Tensor>>(inputs);
return inputs_opt;
}

std::vector<paddle::optional<paddle::Tensor>> GetOptionalMultiOutputGrad(
paddle::optional<std::vector<paddle::Tensor>> GetOptionalMultiOutputGrad(
const std::string& name) {
std::vector<paddle::optional<paddle::Tensor>> outputs_grads;
paddle::optional<std::vector<paddle::Tensor>> outputs_grads_opt;
std::vector<framework::VarDesc*> outputs_grads_descs =
this->MultiOutputGrad(name);
if ((outputs_grads_descs.empty())) {
return outputs_grads_opt;
}
std::vector<paddle::Tensor> outputs_grads;
outputs_grads.reserve(outputs_grads_descs.size());
for (const auto& output_grad_desc : outputs_grads_descs) {
if (output_grad_desc) {
outputs_grads.emplace_back(paddle::make_optional<paddle::Tensor>(
outputs_grads.emplace_back(paddle::Tensor(
paddle::Tensor(std::make_shared<DescTensor>(output_grad_desc))));
} else {
outputs_grads.emplace_back(
paddle::make_optional<paddle::Tensor>(paddle::Tensor()));
outputs_grads.emplace_back(paddle::Tensor(paddle::Tensor()));
}
}
return outputs_grads;
outputs_grads_opt =
paddle::make_optional<std::vector<paddle::Tensor>>(outputs_grads);
return outputs_grads_opt;
}

paddle::Tensor* GetOutputPtr(paddle::Tensor* input) {
Expand Down
5 changes: 4 additions & 1 deletion python/paddle/fluid/tests/unittests/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -970,7 +970,10 @@ def cal_python_api(python_api, args, kernel_sig):
% self.op_type
)
args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, kernel_sig
self.python_api,
eager_tensor_inputs,
attrs_outputs,
kernel_sig,
)
""" we directly return the cal_python_api value because the value is already tensor.
"""
Expand Down
57 changes: 45 additions & 12 deletions python/paddle/fluid/tests/unittests/prim_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,11 @@ def _get_kernel_signature(

@classmethod
def prepare_python_api_arguments(
cls, api, op_proto_ins, op_proto_attrs, kernel_sig
cls,
api,
op_proto_ins,
op_proto_attrs,
kernel_sig,
):
"""map from `op proto inputs and attrs` to `api input list and api attrs dict`

Expand All @@ -100,7 +104,7 @@ def get_default(idx, defaults):
def to_defaults_list(params, defaults):
return [defaults[p] for p in params if p in defaults]

def parse_attri_value(name, op_inputs, op_attrs):
def parse_attri_value(name, op_inputs, op_proto_attrs):
"""parse true value from inputs and attrs, if there is no name passed by OpTest, return Empty
1. if the name in op_attrs, use the op_attrs[name]
2. if the name in op_inputs, convert the op_inputs to [type of default value]
Expand Down Expand Up @@ -155,6 +159,10 @@ def parse_attri_value(name, op_inputs, op_attrs):
for name in attrs_sig
]
results = []
# hack support variable length parameter(such as paddle.meshgrid(*args,**kwargs)
if api_params == []:
results.append(input_arguments)
return results
api_ignore_param_list = set(['name', 'dtype', 'out', 'output'])
idx_of_op_proto_arguments = 0
for idx, arg_name in enumerate(api_params):
Expand All @@ -178,6 +186,7 @@ def parse_attri_value(name, op_inputs, op_attrs):
def assumption_assert_and_transform(cls, args, inp_num):
"""
transform inputs by the following rules:
Note: it may not be possible to distinguish list with one Tensor,you should use wrapper to distinguish.
1. [Tensor] -> Tensor
2. [Tensor, Tensor, ...] -> list of Tensors
3. None -> None
Expand Down Expand Up @@ -374,7 +383,7 @@ def init_checker_threshold(self):

def check(self):
if (
self.place is paddle.fluid.libpaddle.CUDAPlace
type(self.place) is paddle.fluid.libpaddle.CUDAPlace
and not paddle.is_compiled_with_cuda()
):
return
Expand Down Expand Up @@ -420,7 +429,10 @@ def get_eager_desire(self):
_,
) = self.get_eager_input_attr_and_inputdict()
args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig
self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
)
inputs_sig, _, _ = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform(
Expand Down Expand Up @@ -557,7 +569,10 @@ def check_static_comp(self):
feed,
) = self.get_static_input_attr_inputdict_and_feed()
args = OpTestUtils.prepare_python_api_arguments(
self.python_api, static_inputs, attrs, self.kernel_sig
self.python_api,
static_inputs,
attrs,
self.kernel_sig,
)
inputs_sig, _, _ = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform(
Expand Down Expand Up @@ -623,7 +638,10 @@ def check_jit_comp(self):
_,
) = self.get_eager_input_attr_and_inputdict()
args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig
self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
)
inputs_sig, _, _ = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform(
Expand Down Expand Up @@ -700,7 +718,10 @@ def check_jit_comp_with_cinn(self):
_,
) = self.get_eager_input_attr_and_inputdict()
args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig
self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
)
inputs_sig, _, _ = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform(
Expand Down Expand Up @@ -771,7 +792,7 @@ def init(self):

def check(self):
if (
self.place is paddle.fluid.libpaddle.CUDAPlace
type(self.place) is paddle.fluid.libpaddle.CUDAPlace
and not paddle.is_compiled_with_cuda()
):
return
Expand Down Expand Up @@ -856,7 +877,10 @@ def get_eager_desire(self):
inputs_dict,
) = self.get_eager_input_attr_and_inputdict()
args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig
self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
)
inputs_sig, _, outputs_sig = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform(
Expand Down Expand Up @@ -956,7 +980,10 @@ def check_static_comp(self):
feed,
) = self.get_static_input_attr_inputdict_and_feed()
args = OpTestUtils.prepare_python_api_arguments(
self.python_api, static_inputs, attrs, self.kernel_sig
self.python_api,
static_inputs,
attrs,
self.kernel_sig,
)
inputs_sig, _, outputs_sig = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform(
Expand Down Expand Up @@ -1057,7 +1084,10 @@ def check_jit_comp(self):
inputs_dict,
) = self.get_eager_input_attr_and_inputdict()
args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig
self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
)
inputs_sig, _, outputs_sig = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform(
Expand Down Expand Up @@ -1165,7 +1195,10 @@ def check_jit_comp_with_cinn(self):
inputs_dict,
) = self.get_eager_input_attr_and_inputdict()
args = OpTestUtils.prepare_python_api_arguments(
self.python_api, eager_tensor_inputs, attrs_outputs, self.kernel_sig
self.python_api,
eager_tensor_inputs,
attrs_outputs,
self.kernel_sig,
)
inputs_sig, _, outputs_sig = self.kernel_sig
args = OpTestUtils.assumption_assert_and_transform(
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/fluid/tests/unittests/test_expand_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def init_data(self):
self.expand_times = [1]

def test_check_output(self):
self.check_output(check_prim=True)
self.check_output()

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
Expand Down Expand Up @@ -105,10 +105,10 @@ def init_data(self):
self.infer_expand_shape = [-1]

def test_check_output(self):
self.check_output(check_prim=True)
self.check_output()

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
self.check_grad(['X'], 'Out')


class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr):
Expand Down
Loading