pytorch 1.11 support: don't use conv2d_gradfix on v1.11, port grid_sample_gradfix to the new API
thanks @timothybrooks for the fix! for #145
This commit is contained in:
parent
69c7ef0fbd
commit
407db86e6f
2 changed files with 12 additions and 1 deletions
|
@ -11,6 +11,7 @@ arbitrarily high order gradients with zero performance penalty."""
|
||||||
|
|
||||||
import contextlib
|
import contextlib
|
||||||
import torch
|
import torch
|
||||||
|
from pkg_resources import parse_version
|
||||||
|
|
||||||
# pylint: disable=redefined-builtin
|
# pylint: disable=redefined-builtin
|
||||||
# pylint: disable=arguments-differ
|
# pylint: disable=arguments-differ
|
||||||
|
@ -20,6 +21,7 @@ import torch
|
||||||
|
|
||||||
enabled = False # Enable the custom op by setting this to true.
|
enabled = False # Enable the custom op by setting this to true.
|
||||||
weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
|
weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
|
||||||
|
_use_pytorch_1_11_api = parse_version(torch.__version__) >= parse_version('1.11.0a') # Allow prerelease builds of 1.11
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def no_weight_gradients(disable=True):
|
def no_weight_gradients(disable=True):
|
||||||
|
@ -48,6 +50,9 @@ def _should_use_custom_op(input):
|
||||||
assert isinstance(input, torch.Tensor)
|
assert isinstance(input, torch.Tensor)
|
||||||
if (not enabled) or (not torch.backends.cudnn.enabled):
|
if (not enabled) or (not torch.backends.cudnn.enabled):
|
||||||
return False
|
return False
|
||||||
|
if _use_pytorch_1_11_api:
|
||||||
|
# The work-around code doesn't work on PyTorch 1.11.0 onwards
|
||||||
|
return False
|
||||||
if input.device.type != 'cuda':
|
if input.device.type != 'cuda':
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
|
@ -12,6 +12,7 @@ Only works on 2D images and assumes
|
||||||
`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
|
`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
from pkg_resources import parse_version
|
||||||
|
|
||||||
# pylint: disable=redefined-builtin
|
# pylint: disable=redefined-builtin
|
||||||
# pylint: disable=arguments-differ
|
# pylint: disable=arguments-differ
|
||||||
|
@ -20,6 +21,7 @@ import torch
|
||||||
#----------------------------------------------------------------------------
|
#----------------------------------------------------------------------------
|
||||||
|
|
||||||
enabled = False # Enable the custom op by setting this to true.
|
enabled = False # Enable the custom op by setting this to true.
|
||||||
|
_use_pytorch_1_11_api = parse_version(torch.__version__) >= parse_version('1.11.0a') # Allow prerelease builds of 1.11
|
||||||
|
|
||||||
#----------------------------------------------------------------------------
|
#----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@ -56,6 +58,10 @@ class _GridSample2dBackward(torch.autograd.Function):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def forward(ctx, grad_output, input, grid):
|
def forward(ctx, grad_output, input, grid):
|
||||||
op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
|
op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
|
||||||
|
if _use_pytorch_1_11_api:
|
||||||
|
output_mask = (ctx.needs_input_grad[1], ctx.needs_input_grad[2])
|
||||||
|
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False, output_mask)
|
||||||
|
else:
|
||||||
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
|
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
|
||||||
ctx.save_for_backward(grid)
|
ctx.save_for_backward(grid)
|
||||||
return grad_input, grad_grid
|
return grad_input, grad_grid
|
||||||
|
|
Loading…
Reference in a new issue