Skip to content

Commit b61447b

Browse files
author
Yibing Liu
authored
Merge pull request #9 from kuke/convert_fit_a_line
Enable to load parameters & convert fit_a_line model
2 parents 8d816e1 + 262372a commit b61447b

File tree

4 files changed

+125
-47
lines changed

4 files changed

+125
-47
lines changed

convert.py

Lines changed: 90 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -15,65 +15,118 @@
1515
import os
1616
import argparse
1717

18-
from onnx import helper
18+
from onnx import helper, checker
1919
import paddle.fluid as fluid
2020

21-
import ops
22-
from variables import paddle_variable_to_onnx_tensor
21+
import fluid_onnx.ops as ops
22+
from fluid_onnx.variables import paddle_variable_to_onnx_tensor
23+
from fluid_onnx.variables import PADDLE_TO_ONNX_DTYPE
2324

2425

25-
def convert(dirname):
26+
def parse_args():
27+
# Read arguments: path to model.
28+
parser = argparse.ArgumentParser()
29+
parser.add_argument(
30+
"--fluid_model", required=True, help="Input PaddlePaddle Fluid model.")
31+
parser.add_argument(
32+
"--onnx_model", required=False, help="The path to save ONNX model.")
33+
args = parser.parse_args()
34+
return args
35+
36+
37+
def print_arguments(args):
38+
print('----------- Configuration Arguments -----------')
39+
for arg, value in sorted(vars(args).iteritems()):
40+
print('%s: %s' % (arg, value))
41+
print('------------------------------------------------')
42+
43+
44+
def convert(args):
2645
# Read the model files.
2746
place = fluid.CPUPlace()
2847
exe = fluid.Executor(place)
2948

3049
inference_scope = fluid.core.Scope()
3150
with fluid.scope_guard(inference_scope):
3251
[inference_program, feed_target_names,
33-
fetch_targets] = fluid.io.load_inference_model(dirname, exe)
52+
fetch_targets] = fluid.io.load_inference_model(args.fluid_model, exe)
3453

3554
# Using blocks in programs, create nodes using:
3655
onnx_nodes = []
37-
all_inputs = []
38-
for block in inference_program.blocks:
39-
all_inputs += [
40-
paddle_variable_to_onnx_tensor(v, block) for v in block.vars
41-
if v not in ['feed', 'fetch']
42-
]
4356

57+
# Load parameters
58+
global_block = inference_program.global_block()
59+
for var_name in global_block.vars:
60+
var = global_block.var(var_name)
61+
if var_name not in ['feed', 'fetch'] and var.persistable:
62+
param = fluid.executor.fetch_var(var_name, inference_scope)
63+
param_node = helper.make_node(
64+
'Constant',
65+
inputs=[],
66+
outputs=[var_name],
67+
value=helper.make_tensor(
68+
name=var_name,
69+
dims=var.shape,
70+
data_type=PADDLE_TO_ONNX_DTYPE[var.dtype],
71+
vals=param.flatten().tolist()))
72+
onnx_nodes.append(param_node)
73+
74+
# Create inputs
75+
inputs = [
76+
paddle_variable_to_onnx_tensor(v, global_block)
77+
for v in feed_target_names
78+
]
79+
80+
# Create outputs
81+
fetch_target_names = [
82+
fetch_target.name for fetch_target in fetch_targets
83+
]
84+
outputs = [
85+
paddle_variable_to_onnx_tensor(v, global_block)
86+
for v in fetch_target_names
87+
]
88+
89+
# Create nodes
90+
for block in inference_program.blocks:
4491
for op in block.ops:
45-
if op.type in ops.PADDLE_TO_ONNX:
46-
# TODO(varunarora): Attributes.
47-
# TODO(varunarora): Use the modifier function to make the
48-
# transformation.
49-
node_proto = helper.make_node(
50-
ops.PADDLE_TO_ONNX[op.type][0], op.input_arg_names,
51-
op.output_arg_names)
92+
if op.type in ops.node_maker:
93+
# TODO(kuke): deal with the corner case that vars in
94+
# different blocks have the same name
95+
node_proto = ops.node_maker[op.type](
96+
inputs=op.input_arg_names,
97+
attrs=op.attr_names,
98+
outputs=op.output_arg_names)
5299

53100
onnx_nodes.append(node_proto)
54101
else:
55-
# Not valid to skip any op, so after all edge cases have
56-
# been accounted for, this exception raising to be
57-
# re-enabled.
58-
# raise NameError(op.type)
59-
pass
102+
if op.type not in ['feed', 'fetch']:
103+
raise NotImplementedError("OP[%s] is not supported in "
104+
"the converter!" % op.type)
105+
106+
# Make graph
107+
model_name = os.path.basename(args.fluid_model.strip('/')).split('.')[0]
108+
onnx_graph = helper.make_graph(onnx_nodes, model_name, inputs, outputs)
60109

61-
# Nodes, name of graph, inputs, outputs.
62-
if dirname[-1] == '/':
63-
dirname = dirname[:-1]
64-
graph = helper.make_graph(onnx_nodes,
65-
os.path.basename(dirname).split('.')[0],
66-
all_inputs, [])
110+
# Make model
111+
onnx_model = helper.make_model(onnx_graph, producer_name='PaddlePaddle')
67112

68-
print graph
113+
# Model check
114+
checker.check_model(onnx_model)
69115

70-
# TODO(varunarora): Plug in parameters.
116+
# Output readable model
117+
print("The converted model is:\n{}".format(onnx_model))
118+
119+
# Save converted model
120+
if args.onnx_model is not None:
121+
try:
122+
with open(args.onnx_model, 'wb') as f:
123+
f.write(onnx_model.SerializeToString())
124+
print("Saved converted model to path: %s" % args.onnx_model)
125+
except (IOError), e:
126+
print("Invalid ONNX model saving path: %s" % args.onnx_model)
71127

72128

73129
if __name__ == "__main__":
74-
# Read arguments: path to model.
75-
parser = argparse.ArgumentParser()
76-
parser.add_argument(
77-
"--modeldir", required=True, help="Input PaddlePaddle model")
78-
args = parser.parse_args()
79-
convert(args.modeldir)
130+
args = parse_args()
131+
print_arguments(args)
132+
convert(args)

fluid_onnx/__init__.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.

ops.py renamed to fluid_onnx/ops.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14+
15+
from onnx.helper import make_node
1416
"""
1517
Priority of ops (uniques) to figure out support for.
1618
@@ -53,8 +55,8 @@ def abs_op():
5355
pass
5456

5557

56-
def add_op():
57-
pass
58+
def add_op(inputs, attrs, outputs):
59+
return make_node('Add', inputs=inputs, outputs=outputs, broadcast=1)
5860

5961

6062
def and_op():
@@ -222,8 +224,8 @@ def lppool_op():
222224
pass
223225

224226

225-
def matmul_op():
226-
pass
227+
def matmul_op(inputs, attrs, outputs):
228+
return make_node('MatMul', inputs=inputs, outputs=outputs)
227229

228230

229231
def max_op():
@@ -445,10 +447,10 @@ def xor_op():
445447
# ONNX Ops that use multiple Paddle ops are keyed by '<op1>,<op2>' fed into the
446448
# modifier.
447449

448-
PADDLE_TO_ONNX = {
450+
node_maker = {
449451
# Paddle op name : (ONNX op name, modifier)
450452
'abs': ('Abs', abs_op),
451-
'elementwise_add': ('Add', add_op),
453+
'elementwise_add': add_op,
452454

453455
# '': 'And', # ?
454456
# 'ArgMax', NEEDS ATTENTION.
@@ -496,7 +498,7 @@ def xor_op():
496498
'': 'MaxRoiPool',
497499
'mean': ('Mean', mean_op),
498500
'': 'Min',
499-
'mul': ('Mul', mul_op),
501+
'mul': matmul_op,
500502
',': 'Neg',
501503
'': 'Not',
502504
'': 'Or',

variables.py renamed to fluid_onnx/variables.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,16 +12,26 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import numpy as np
1516
from onnx import helper, onnx_pb2, TensorProto
1617
import paddle.fluid.core as core
1718

1819

1920
def paddle_variable_to_onnx_tensor(paddle_var_name, block):
2021
# TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.
2122
paddle_var = block.var(paddle_var_name)
22-
return helper.make_tensor_value_info(paddle_var_name,
23-
PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],
24-
paddle_var.shape)
23+
shape = paddle_onnx_shape(paddle_var.shape)
24+
return helper.make_tensor_value_info(
25+
paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype], shape)
26+
27+
28+
def paddle_onnx_shape(paddle_shape):
29+
""" Convert shape info from paddle to onnx
30+
"""
31+
32+
onnx_shape = np.array(list(paddle_shape))
33+
onnx_shape[onnx_shape < 0] = 0
34+
return tuple(onnx_shape)
2535

2636

2737
PADDLE_TO_ONNX_DTYPE = {

0 commit comments

Comments
 (0)