@@ -232,7 +232,7 @@ def func(z):
232
232
@pytest .mark .parametrize ("x_dtype" , (torch .float , torch .half ))
233
233
@pytest .mark .parametrize ("rois_dtype" , (torch .float , torch .half ))
234
234
def test_autocast (self , x_dtype , rois_dtype ):
235
- with torch .cuda . amp .autocast ():
235
+ with torch .amp .autocast ("cuda" ):
236
236
self .test_forward (torch .device ("cuda" ), contiguous = False , x_dtype = x_dtype , rois_dtype = rois_dtype )
237
237
238
238
def _helper_boxes_shape (self , func ):
@@ -497,7 +497,7 @@ def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois
497
497
@pytest .mark .parametrize ("rois_dtype" , (torch .float , torch .half ))
498
498
@pytest .mark .opcheck_only_one ()
499
499
def test_autocast (self , aligned , deterministic , x_dtype , rois_dtype ):
500
- with torch .cuda . amp .autocast ():
500
+ with torch .amp .autocast ("cuda" ):
501
501
self .test_forward (
502
502
torch .device ("cuda" ),
503
503
contiguous = False ,
@@ -513,7 +513,7 @@ def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype):
513
513
@pytest .mark .parametrize ("x_dtype" , (torch .float , torch .bfloat16 ))
514
514
@pytest .mark .parametrize ("rois_dtype" , (torch .float , torch .bfloat16 ))
515
515
def test_autocast_cpu (self , aligned , deterministic , x_dtype , rois_dtype ):
516
- with torch .cpu . amp .autocast ():
516
+ with torch .amp .autocast ("cpu" ):
517
517
self .test_forward (
518
518
torch .device ("cpu" ),
519
519
contiguous = False ,
@@ -856,14 +856,14 @@ def test_nms_gpu(self, iou, device, dtype=torch.float64):
856
856
@pytest .mark .parametrize ("dtype" , (torch .float , torch .half ))
857
857
@pytest .mark .opcheck_only_one ()
858
858
def test_autocast (self , iou , dtype ):
859
- with torch .cuda . amp .autocast ():
859
+ with torch .amp .autocast ("cuda" ):
860
860
self .test_nms_gpu (iou = iou , dtype = dtype , device = "cuda" )
861
861
862
862
@pytest .mark .parametrize ("iou" , (0.2 , 0.5 , 0.8 ))
863
863
@pytest .mark .parametrize ("dtype" , (torch .float , torch .bfloat16 ))
864
864
def test_autocast_cpu (self , iou , dtype ):
865
865
boxes , scores = self ._create_tensors_with_iou (1000 , iou )
866
- with torch .cpu . amp .autocast ():
866
+ with torch .amp .autocast ("cpu" ):
867
867
keep_ref_float = ops .nms (boxes .to (dtype ).float (), scores .to (dtype ).float (), iou )
868
868
keep_dtype = ops .nms (boxes .to (dtype ), scores .to (dtype ), iou )
869
869
torch .testing .assert_close (keep_ref_float , keep_dtype )
@@ -1193,7 +1193,7 @@ def test_compare_cpu_cuda_grads(self, contiguous):
1193
1193
@pytest .mark .parametrize ("dtype" , (torch .float , torch .half ))
1194
1194
@pytest .mark .opcheck_only_one ()
1195
1195
def test_autocast (self , batch_sz , dtype ):
1196
- with torch .cuda . amp .autocast ():
1196
+ with torch .amp .autocast ("cuda" ):
1197
1197
self .test_forward (torch .device ("cuda" ), contiguous = False , batch_sz = batch_sz , dtype = dtype )
1198
1198
1199
1199
def test_forward_scriptability (self ):
0 commit comments