Skip to content

Commit

Permalink
feat: changed output format of onnx converter
Browse files Browse the repository at this point in the history
  • Loading branch information
sid-alluri committed Aug 16, 2023
1 parent 1d0b0c7 commit 158f419
Show file tree
Hide file tree
Showing 4 changed files with 151 additions and 64 deletions.
6 changes: 6 additions & 0 deletions python/onnx_converter/dataprinter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
import onnx

onnx_model = onnx.load("mnist-8.onnx")

print(onnx_model.graph.value_info)

Binary file modified python/onnx_converter/first_transformed_onnx.msgpack
Binary file not shown.
207 changes: 144 additions & 63 deletions python/onnx_converter/onnxconverter.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,30 +3,16 @@
import numpy as np
import msgpack
from onnx import numpy_helper
import argparse

scale_factor = 512

# Loading ONNX model

onnx_model = onnx.load("mnist-8.onnx")
model_graph = onnx_model.graph
model_input = model_graph.input
model_nodes = model_graph.node
model_init = onnx_model.graph.initializer

# Helper Functions


def get_shape(container, node_id):
dim = container.__getitem__(node_id).type.tensor_type.shape.dim
dim_list = list()
for i in range(len(dim)):
dim_list.append(dim.pop(0).dim_value)
return dim_list


for i in range(len(dim)):
dim_list.append(dim.pop(i).dim_value)
dim_list.append(int(dim.pop(0).dim_value))
return dim_list


Expand All @@ -41,106 +27,201 @@ def get_output_dim(node_id, graph):


def get_input_dim(layers):
return layers[-1]['output_shapes']
return layers[-1]['out_shapes'][0]


def create_wbdim_map(graph):
wbdim_map = {}
for init in graph.initializer:
n = init.name
dim = init.dims
wbdim_map[n] = [int(_) for _ in init.dims]
return wbdim_map

parser = argparse.ArgumentParser()
# parser.add_argument('--model', type=str, required=True, default="mnist-8.onnx")
# parser.add_argument('--model_output', type=str, required=True)
# parser.add_argument('--config_output', type=str, required=True)
parser.add_argument('--scale_factor', type=int, default=2**9)
parser.add_argument('--k', type=int, default=19)
parser.add_argument('--eta', type=float, default=0.001)
parser.add_argument('--num_cols', type=int, default=6)
parser.add_argument('--use_selectors', action=argparse.BooleanOptionalAction, required=False, default=True)
parser.add_argument('--commit', action=argparse.BooleanOptionalAction, required=False, default=False)
parser.add_argument('--expose_output', action=argparse.BooleanOptionalAction, required=False, default=True)
parser.add_argument('--start_layer', type=int, default=0)
parser.add_argument('--end_layer', type=int, default=10000)
parser.add_argument('--num_randoms', type=int, default=20001)
args = parser.parse_args()


# Loading ONNX model

onnx_model = onnx.load("mnist-8.onnx")
scale_factor = args.scale_factor
model_graph = onnx_model.graph
model_input = model_graph.input
model_nodes = model_graph.node
model_init = onnx_model.graph.initializer

# Converting Layers

layers = list()
commit_before = list()
commit_after = list()
wbdim_map = create_wbdim_map(model_graph)
node_id = 0
for node in model_nodes:

if node.op_type == "Conv":
layer_type = "Conv2D"
output_id = node_id
if node_id == 0:
input_dim = get_shape(model_input, 0)
else:
input_dim = get_input_dim(layers)

inputs_dim = []
for input in node.input:
if input in wbdim_map.keys():
inputs_dim.append(wbdim_map[input])
elif input not in wbdim_map.keys() and node_id == 0:
inp_dim = get_shape(model_input, 0)
inputs_dim.append(inp_dim)
elif input not in wbdim_map.keys() and node_id != 0:
inp_dim = get_input_dim(layers)
inputs_dim.append(inp_dim)

output_dim = get_output_dim(node_id, model_graph)
node_attr = node.attribute
kernel = [_ for _ in node_attr.pop(0).ints]
stride = [_ for _ in node_attr.pop(0).ints]
kernel = [np.int64(_).item() for _ in node_attr.pop(0).ints]
stride = [np.int64(_).item() for _ in node_attr.pop(0).ints]
# padding = str(node_attr.pop(0).s) # TODO
params = [kernel, stride]
params = [kernel[0], stride[0]]
elif node.op_type == "MaxPool":
layer_type = "MaxPool2D"
output_id = node_id
if node_id == 0:
input_dim = get_shape(model_input, 0)
else:
input_dim = get_input_dim(layers)

inputs_dim = []
for input in node.input:
if input in wbdim_map.keys():
inputs_dim.append(wbdim_map[input])
elif input not in wbdim_map.keys() and node_id == 0:
inp_dim = get_shape(model_input, 0)
inputs_dim.append(inp_dim)
elif input not in wbdim_map.keys() and node_id != 0:
inp_dim = get_input_dim(layers)
inputs_dim.append(inp_dim)

output_dim = get_output_dim(node_id, model_graph)
node_attr = node.attribute
kernel = [_ for _ in node_attr.pop(0).ints]
stride = [_ for _ in node_attr.pop(0).ints]
params = [kernel, stride]
kernel = [np.int64(_).item() for _ in node_attr.pop(0).ints]
stride = [np.int64(_).item() for _ in node_attr.pop(0).ints]
params = [kernel[0], stride[0]]

elif node.op_type == "Relu":
layer_type = "ReLU"
output_id = node_id
if node_id == 0:
input_dim = get_shape(model_input, 0)
else:
input_dim = get_input_dim(layers)
inputs_dim = []
for input in node.input:
if input in wbdim_map.keys():
inputs_dim.append(wbdim_map[input])
elif input not in wbdim_map.keys() and node_id == 0:
inp_dim = get_shape(model_input, 0)
inputs_dim.append(inp_dim)
elif input not in wbdim_map.keys() and node_id != 0:
inp_dim = get_input_dim(layers)
inputs_dim.append(inp_dim)

output_dim = get_output_dim(node_id, model_graph)
params = None
params = [[]]

elif node.op_type == "Reshape":
layer_type = "Reshape"
output_id = node_id
if node_id == 0:
input_dim = get_shape(model_input, 0)
else:
input_dim = get_input_dim(layers)
inputs_dim = []
for input in node.input:
if input in wbdim_map.keys():
inputs_dim.append(wbdim_map[input])
elif input not in wbdim_map.keys() and node_id == 0:
inp_dim = get_shape(model_input, 0)
inputs_dim.append(inp_dim)
elif input not in wbdim_map.keys() and node_id != 0:
inp_dim = get_input_dim(layers)
inputs_dim.append(inp_dim)
output_dim = get_output_dim(node_id, model_graph)
params = None
params = [[]]

elif node.op_type == "Gemm":
layer_type = "Fully Connected Layer"
output_id = node_id
if node_id == 0:
input_dim = get_shape(model_input, 0)
else:
input_dim = get_input_dim(layers)
inputs_dim = []
for input in node.input:
if input in wbdim_map.keys():
inputs_dim.append(wbdim_map[input])
elif input not in wbdim_map.keys() and node_id == 0:
inp_dim = get_shape(model_input, 0)
inputs_dim.append(inp_dim)
elif input not in wbdim_map.keys() and node_id != 0:
inp_dim = get_input_dim(layers)
inputs_dim.append(inp_dim)
output_dim = get_output_dim(node_id, model_graph)
params = None
params = [[]]
else:
node_id += 1
continue
layer = {
"layer_type": layer_type,
"node_id": node_id,
"input_shapes": input_dim,
"output_shapes": output_dim,
"params": params
"params": params, ## Change params HELP HELP HELP HELP
"inp_shapes": inputs_dim,
"inp_idxes": [node_id], ### RANDOM COME BACK HERE HELP HELP HELP HELP
"out_idxes": [node_id+1], ### RANDOM COME BACK HERE HELP HELP HELP HELP
"out_shapes": [output_dim],
"mask": []
}
layers.append(layer)
node_id += 1
for layer in layers:
for l in layer.keys():
print(layer[l])
break
# print(layers)

# Converting W&B

# Converting W&B
init_id = 0
tensors = list()
for init in model_init:
shape = [dim for dim in init.dims]
shape = [np.int64(dim).item() for dim in init.dims]
raw_data = numpy_helper.to_array(init).ravel().tolist() ## Orientation of ravel ?????
data = []
for i in raw_data:
if isinstance(i, float):
buf = int(np.round(i * scale_factor))
buf = np.int64(np.round(i * scale_factor)).item()
data.append(buf)
tensor = {"shape": shape, "data": data}
elif isinstance(i, int):
buf = np.int64(i).item()
else:
None
tensor = {
"idx": init_id ,
"shape": shape,
"data": data
}
tensors.append(tensor)
init_id+=1
# print(tensors)


# Converting to msgpack

final_dict = {
"scaling_factor": scale_factor,
"layers": layers,
"tensors": tensors
'global_sf': scale_factor,
'k': args.k,
'num_cols': args.num_cols,
'inp_idxes': [0], ### RANDOM COME BACK HERE
'out_idxes': [node_id], ### RANDOM COME BACK HERE
'layers': layers,
'tensors': tensors,
'use_selectors': args.use_selectors,
'commit_before': commit_before,
'commit_after': commit_after,
'bits_per_elem': None,
'num_random': args.num_randoms,

}

# print(final_dict)

with open("first_transformed_onnx.msgpack", "wb") as mfile:
mfile.write(msgpack.packb(final_dict))
2 changes: 1 addition & 1 deletion src/utils/loader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,4 +74,4 @@ pub fn load_model_msgpack(config_path: &str, inp_path: &str) -> ModelMsgpack {
};

model
}
}

0 comments on commit 158f419

Please sign in to comment.