Skip to content

Commit e75fb43

Browse files
dreissfacebook-github-bot
authored andcommittedApr 22, 2020
Remove (most) Python 2 support from Python code (pytorch#35615)
Summary: Pull Request resolved: pytorch#35615 Python 2 has reached end-of-life and is no longer supported by PyTorch. Now we can clean up a lot of cruft that we put in place to support it. These changes were all done manually, and I skipped anything that seemed like it would take more than a few seconds, so I think it makes sense to review it manually as well (though using side-by-side view and ignoring whitespace change might be helpful). Test Plan: CI Differential Revision: D20842886 Pulled By: dreiss fbshipit-source-id: 8cad4e87c45895e7ce3938a88e61157a79504aed
1 parent a894fff commit e75fb43

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+145
-646
lines changed
 

‎benchmarks/fastrnns/profile.py

+2-5
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,6 @@
77

88
from .runner import get_nn_runners
99

10-
PY3 = sys.version_info >= (3, 0)
11-
1210

1311
def run_rnn(name, rnn_creator, nloops=5,
1412
seqLength=100, numLayers=1, inputSize=512, hiddenSize=512,
@@ -60,9 +58,8 @@ def system(command):
6058
stderr=subprocess.PIPE, shell=True)
6159
output, err = p.communicate()
6260
rc = p.returncode
63-
if PY3:
64-
output = output.decode("ascii")
65-
err = err.decode("ascii")
61+
output = output.decode("ascii")
62+
err = err.decode("ascii")
6663
return rc, output, err
6764

6865

‎caffe2/python/ideep/conv_op_test.py

-146
Original file line numberDiff line numberDiff line change
@@ -158,152 +158,6 @@ def test_depthwise_convolution(self, batch_size, gc, dc):
158158
print(np.max(np.abs(Y2 - Y0)))
159159
self.assertTrue(False)
160160

161-
@unittest.skipIf(sys.version_info.major > 2, "broken in python 3")
162-
@given(stride=st.integers(1, 3),
163-
pad=st.integers(0, 3),
164-
kernel=st.integers(3, 5),
165-
size=st.integers(8, 10),
166-
input_channels=st.integers(1, 3),
167-
output_channels=st.integers(1, 5),
168-
batch_size=st.integers(1, 3),
169-
use_bias=st.booleans(),
170-
**mu.gcs)
171-
def test_int8_convolution(self, stride, pad, kernel, size,
172-
input_channels, output_channels,
173-
batch_size, use_bias, gc, dc):
174-
X = np.random.rand(
175-
batch_size, input_channels, size, size).astype(np.float32) - 0.5
176-
w = np.random.rand(
177-
output_channels, input_channels, kernel, kernel) .astype(np.float32) - 0.5
178-
b = np.random.rand(output_channels).astype(np.float32) - 0.5
179-
180-
old_ws_name = workspace.CurrentWorkspace()
181-
workspace.SwitchWorkspace("_device_check_", True)
182-
conv_fp32 = core.CreateOperator(
183-
"Conv",
184-
["X_fp32", "w_fp32", "b_fp32"] if use_bias else ["X_fp32", "w_fp32"],
185-
["Y_fp32"],
186-
stride=stride,
187-
pad=pad,
188-
kernel=kernel,
189-
training_mode=0,
190-
device_option=dc[0],
191-
)
192-
workspace.FeedBlob('X_fp32', X, dc[0])
193-
workspace.FeedBlob('w_fp32', w, dc[0])
194-
workspace.FeedBlob('b_fp32', b, dc[0])
195-
workspace.RunOperatorOnce(conv_fp32)
196-
Y = workspace.FetchBlob('Y_fp32')
197-
198-
workspace.ResetWorkspace()
199-
200-
Y_absmax = np.array([np.absolute(Y).max()]).astype(np.float32)
201-
if Y.min() >= 0:
202-
Y_scale = Y_absmax / 0xFF
203-
Y_zero_point = 0
204-
else:
205-
Y_scale = Y_absmax / 0x7F
206-
Y_zero_point = 128
207-
208-
X_absmax = np.array([np.absolute(X).max()]).astype(np.float32)
209-
if X.min() >= 0:
210-
X_scale = X_absmax / 0xFF
211-
X_zero_point = 0
212-
else:
213-
X_scale = X_absmax / 0x7F
214-
X_zero_point = 128
215-
216-
w_absmax = np.array([np.absolute(w[i, ...]).max() for i in range(w.shape[0])]).astype(np.float32)
217-
w_scale = w_absmax / 0x7F
218-
w_zero_point = 128
219-
w = np.transpose(w, (0, 2, 3, 1)).astype(np.float32)
220-
w_bytes = np.rint([w[i, ...] / w_scale[i] for i in range(w.shape[0])]).astype(np.int8) + w_zero_point
221-
222-
w_filler = core.CreateOperator(
223-
"Int8GivenTensorFill",
224-
[], ["w"],
225-
shape=w.shape,
226-
values=w_bytes.astype(np.uint8).tobytes(),
227-
Y_zero_point=w_zero_point,
228-
Y_scales=w_scale,
229-
device_option=dc[1],
230-
)
231-
232-
b_scale = w_scale * X_scale
233-
b_zero_point = 0
234-
b_bytes = np.rint([b[i] / b_scale[i] for i in range(b.shape[0])]).astype(np.int32)
235-
b_filler = core.CreateOperator(
236-
"Int8GivenIntTensorFill",
237-
[], ["b"],
238-
shape=b.shape,
239-
values=b_bytes,
240-
Y_zero_point=b_zero_point,
241-
Y_scales=b_scale,
242-
device_option=dc[1],
243-
)
244-
245-
sw2nhwc = core.CreateOperator(
246-
"NCHW2NHWC",
247-
["X"],
248-
["X_nhwc"],
249-
device_option=dc[1]
250-
)
251-
252-
quantize_X = core.CreateOperator(
253-
"Int8Quantize",
254-
["X_nhwc"],
255-
["X_quantized"],
256-
engine="DNNLOWP",
257-
device_option=dc[1],
258-
Y_zero_point=X_zero_point,
259-
Y_scale=X_scale[0],
260-
)
261-
262-
conv = core.CreateOperator(
263-
"Int8Conv",
264-
["X_quantized", "w", "b"] if use_bias else ["X_quantized", "w"],
265-
["Y_quantized"],
266-
stride=stride,
267-
pad=pad,
268-
kernel=kernel,
269-
engine="DNNLOWP",
270-
device_option=dc[1],
271-
Y_zero_point=Y_zero_point,
272-
Y_scale=Y_scale[0],
273-
)
274-
275-
dequantize_Y = core.CreateOperator(
276-
"Int8Dequantize",
277-
["Y_quantized"],
278-
["Y_nhwc"],
279-
engine="DNNLOWP",
280-
device_option=dc[1],
281-
)
282-
283-
sw2nchw = core.CreateOperator(
284-
"NHWC2NCHW",
285-
["Y_nhwc"],
286-
["Y_out"],
287-
device_option=dc[1]
288-
)
289-
290-
net = caffe2_pb2.NetDef()
291-
net.op.extend([w_filler, b_filler, sw2nhwc, quantize_X, conv, dequantize_Y, sw2nchw])
292-
293-
workspace.FeedBlob("X", X, dc[1])
294-
workspace.RunNetOnce(net)
295-
Y_out = workspace.FetchBlob("Y_out")
296-
297-
MSE = np.square(np.subtract(Y, Y_out)).mean()
298-
if MSE > 0.005:
299-
print(Y.flatten())
300-
print(Y_out.flatten())
301-
print(np.max(np.abs(Y_out - Y)))
302-
print("MSE", MSE)
303-
self.assertTrue(False)
304-
305-
workspace.SwitchWorkspace(old_ws_name)
306-
307161

308162

309163
if __name__ == "__main__":

0 commit comments

Comments
 (0)
Please sign in to comment.