@@ -14,13 +14,13 @@ def get_handler_name(node_kind):
14
14
15
15
16
16
class NodeMapper (object ):
17
-
17
+
18
18
@classmethod
19
19
def _convert_output_shape (cls , kwargs , node ):
20
20
shape = TensorShape ()
21
21
dim = shape .dim .add ()
22
22
dim .size = - 1
23
-
23
+
24
24
if len (node .output_shape ) > 2 :
25
25
for i in node .output_shape [2 :]:
26
26
dim = shape .dim .add ()
@@ -31,22 +31,22 @@ def _convert_output_shape(cls, kwargs, node):
31
31
dim = shape .dim .add ()
32
32
dim .size = node .output_shape [1 ]
33
33
kwargs ['_output_shapes' ] = [shape ]
34
-
34
+
35
35
@classmethod
36
36
def get_kernel_params (cls , node ):
37
- kwargs = {}
37
+ kwargs = {}
38
38
if node .kernel_parameters .p_h > 0 or node .kernel_parameters .p_w > 0 :
39
- padding = [0 , 0 , node .kernel_parameters .p_h , node .kernel_parameters .p_h , node .kernel_parameters .p_w , node .kernel_parameters .p_w , 0 , 0 ]
39
+ padding = [0 , node .kernel_parameters .p_h , node .kernel_parameters .p_w , 0 , 0 , node .kernel_parameters .p_h , node .kernel_parameters .p_w , 0 ]
40
40
elif node .kernel_parameters .s_h > 1 or node .kernel_parameters .s_w > 1 :
41
- padding = [0 , 0 , (node .kernel_parameters .s_h - 1 ) // 2 , node .kernel_parameters .s_h // 2 , ( node .kernel_parameters .s_w - 1 ) // 2 , node .kernel_parameters .s_w // 2 , 0 , 0 ]
41
+ padding = [0 , (node .kernel_parameters .s_h - 1 ) // 2 , ( node .kernel_parameters .s_w - 1 ) // 2 , 0 , 0 , node .kernel_parameters .s_h // 2 , node .kernel_parameters .s_w // 2 , 0 ]
42
42
else :
43
43
padding = None
44
-
45
- kwargs ['padding ' ] = 'VALID'
44
+
45
+ kwargs ['auto_pad ' ] = 'VALID'
46
46
kwargs ['strides' ] = [1 , node .kernel_parameters .s_h , node .kernel_parameters .s_w , 1 ]
47
47
cls ._convert_output_shape (kwargs , node )
48
-
49
- return kwargs , {'paddings ' : padding , 'mode' : 'CONSTANT ' , 'constant_values' : 0.0 }
48
+
49
+ return kwargs , {'pads ' : padding , 'mode' : 'constant ' , 'constant_values' : 0.0 }
50
50
51
51
52
52
@classmethod
@@ -60,7 +60,7 @@ def map_data(cls, node):
60
60
dim .size = i
61
61
dim = shape .dim .add ()
62
62
dim .size = node .output_shape .channels
63
-
63
+
64
64
kwargs = {'shape' : shape } # Ignore the dimension of batch size
65
65
cls ._convert_output_shape (kwargs , node )
66
66
return Node .create ('DataInput' , ** kwargs )
@@ -74,29 +74,30 @@ def map_input(cls, node):
74
74
def map_convolution (cls , node ):
75
75
kwargs , padding = cls .get_kernel_params (node )
76
76
parent , _ = node .get_only_parent ()
77
- kwargs ['filter ' ] = [node .kernel_parameters .k_h , node .kernel_parameters .k_w , parent .output_shape .channels , node .parameters .num_output ]
78
- kwargs ['use_bias' ] = node .parameters .bias_term
77
+ kwargs ['kernel_shape ' ] = [node .kernel_parameters .k_h , node .kernel_parameters .k_w , parent .output_shape .channels , node .parameters .num_output ]
78
+ kwargs ['use_bias' ] = node .parameters .bias_term
79
79
group = node .parameters .group
80
80
if group != 1 :
81
81
kwargs ['group' ] = group
82
-
83
- if padding ['paddings ' ] != None :
84
- return [Node .create ('Pad' , ** padding ), Node .create ('Convolution ' , ** kwargs )]
82
+
83
+ if padding ['pads ' ] != None :
84
+ return [Node .create ('Pad' , ** padding ), Node .create ('Conv ' , ** kwargs )]
85
85
else :
86
- return Node .create ('Convolution' , ** kwargs )
86
+ kwargs ['pads' ] = [0 ] * 8
87
+ return Node .create ('Conv' , ** kwargs )
87
88
88
89
89
90
@classmethod
90
91
def map_deconvolution (cls , node ):
91
92
raise NotImplementedError ()
92
93
kwargs = cls .get_kernel_params (node )
93
94
parent , _ = node .get_only_parent ()
94
- kwargs ['filter ' ] = [node .kernel_parameters .k_h , node .kernel_parameters .k_w , parent .output_shape .channels , node .parameters .num_output ]
95
+ kwargs ['kernel_shape ' ] = [node .kernel_parameters .k_h , node .kernel_parameters .k_w , parent .output_shape .channels , node .parameters .num_output ]
95
96
group = node .parameters .group
96
97
if group != 1 :
97
- kwargs ['group' ] = group
98
+ kwargs ['group' ] = group
98
99
return Node .create ('deconv' , ** kwargs )
99
-
100
+
100
101
@classmethod
101
102
def map_crop (cls , node ):
102
103
offset = node .parameters .offset
@@ -105,13 +106,13 @@ def map_crop(cls, node):
105
106
return Node .create ('crop' , ** kwargs )
106
107
else :
107
108
return Node .create ('crop' )
108
-
109
+
109
110
@classmethod
110
111
def map_relu (cls , node ):
111
112
kwargs = {}
112
113
cls ._convert_output_shape (kwargs , node )
113
114
return Node .create ('Relu' , ** kwargs )
114
-
115
+
115
116
@classmethod
116
117
def map_pooling (cls , node ):
117
118
kwargs , padding = cls .get_kernel_params (node )
@@ -122,21 +123,22 @@ def map_pooling(cls, node):
122
123
else :
123
124
# Stochastic pooling, for instance.
124
125
raise ConversionError ('Unsupported pooling type.' )
125
- kwargs ['window_shape ' ] = [1 , node .kernel_parameters .k_h , node .kernel_parameters .k_w , 1 ]
126
+ kwargs ['kernel_shape ' ] = [1 , node .kernel_parameters .k_h , node .kernel_parameters .k_w , 1 ]
126
127
cls ._convert_output_shape (kwargs , node )
127
-
128
- if padding ['paddings ' ] != None :
128
+
129
+ if padding ['pads ' ] != None :
129
130
return [Node .create ('Pad' , ** padding ), Node .create ('Pool' , ** kwargs )]
130
131
else :
132
+ kwargs ['pads' ] = [0 ] * 8
131
133
return Node .create ('Pool' , ** kwargs )
132
-
134
+
133
135
134
136
@classmethod
135
137
def _add_flatten_layer (cls , node ):
136
138
shape = TensorShape ()
137
139
dim = shape .dim .add ()
138
- dim .size = - 1
139
-
140
+ dim .size = - 1
141
+
140
142
dim = shape .dim .add ()
141
143
dim .size = 1
142
144
for i in node .output_shape [1 :]:
@@ -149,40 +151,40 @@ def map_inner_product(cls, node):
149
151
#TODO: Axis
150
152
assert node .parameters .axis == 1
151
153
#TODO: Unbiased
152
- kwargs = {'use_bias' : node .parameters .bias_term , 'units' : node .parameters .num_output }
153
-
154
+ kwargs = {'use_bias' : node .parameters .bias_term , 'units' : node .parameters .num_output }
155
+
154
156
# check if need the Flatten layer
155
157
parent , _ = node .get_only_parent ()
156
158
ret = []
157
159
if parent .output_shape .height > 1 or parent .output_shape .width > 1 :
158
160
ret .append (cls ._add_flatten_layer (parent ))
159
161
ret .append (Node .create ('FullyConnected' , ** kwargs ))
160
162
return ret
161
-
163
+
162
164
@classmethod
163
165
def map_softmax (cls , node ):
164
166
return Node .create ('Softmax' )
165
-
167
+
166
168
@classmethod
167
169
def map_lrn (cls , node ):
168
170
params = node .parameters
169
171
assert params .local_size % 2 == 1
170
172
kwargs = {'size' : int ((params .local_size + 1 ) / 2 ), 'alpha' : params .alpha , 'beta' : params .beta , 'k' : params .k }
171
173
cls ._convert_output_shape (kwargs , node )
172
174
return Node .create ('LRN' , ** kwargs )
173
-
175
+
174
176
@classmethod
175
177
def map_concat (cls , node ):
176
178
kwargs = {'axis' : (2 , 3 , 1 , 0 )[node .parameters .axis ]}
177
179
cls ._convert_output_shape (kwargs , node )
178
180
return Node .create ('Concat' , ** kwargs )
179
-
181
+
180
182
@classmethod
181
183
def map_dropout (cls , node ):
182
184
kwargs = {'keep_prob' : node .parameters .dropout_ratio }
183
185
cls ._convert_output_shape (kwargs , node )
184
186
return Node .create ('Dropout' , ** kwargs )
185
-
187
+
186
188
@classmethod
187
189
def map_batch_norm (cls , node ):
188
190
scale_offset = len (node .data ) == 4
@@ -191,12 +193,12 @@ def map_batch_norm(cls, node):
191
193
kwargs ['epsilon' ] = epsilon
192
194
cls ._convert_output_shape (kwargs , node )
193
195
return Node .create ('batch_normalization' , ** kwargs )
194
-
196
+
195
197
@classmethod
196
198
def map_eltwise (cls , node ):
197
199
operations = {0 : 'mul' , 1 : 'sum' , 2 : 'max' }
198
200
op_code = node .parameters .operation
199
- try :
201
+ try :
200
202
return Node .create (operations [op_code ])
201
203
except KeyError :
202
204
raise ConversionError ('Unknown elementwise operation: {}' .format (op_code ))
0 commit comments