entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
NasPathBranch | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qn/cqnqbqlcpg6bgcau35r5lxgdywlkgto5q42fnp26ywsu7emttdb4.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [1, 1], [2, 2], [0, 0], False, False), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
return (buf1, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
def conv1x1(in_channels, out_channels, stride=1, bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=stride, bias=bias)
def nasnet_avgpool1x1_s2():
"""
NASNet specific 1x1 Average pooling layer with stride 2.
"""
return nn.AvgPool2d(kernel_size=1, stride=2, count_include_pad=False)
class NasPathBranch(nn.Module):
"""
NASNet specific `path` branch (auxiliary block).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
"""
def __init__(self, in_channels, out_channels, extra_padding=False):
super(NasPathBranch, self).__init__()
self.extra_padding = extra_padding
self.avgpool = nasnet_avgpool1x1_s2()
self.conv = conv1x1(in_channels=in_channels, out_channels=out_channels)
if self.extra_padding:
self.pad = nn.ZeroPad2d(padding=(0, 1, 0, 1))
def forward(self, x):
if self.extra_padding:
x = self.pad(x)
x = x[:, :, 1:, 1:].contiguous()
x = self.avgpool(x)
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](primals_1, buf0, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
return buf1, primals_2, buf0
def conv1x1(in_channels, out_channels, stride=1, bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=stride, bias=bias)
def nasnet_avgpool1x1_s2():
"""
NASNet specific 1x1 Average pooling layer with stride 2.
"""
return nn.AvgPool2d(kernel_size=1, stride=2, count_include_pad=False)
class NasPathBranchNew(nn.Module):
"""
NASNet specific `path` branch (auxiliary block).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
"""
def __init__(self, in_channels, out_channels, extra_padding=False):
super(NasPathBranchNew, self).__init__()
self.extra_padding = extra_padding
self.avgpool = nasnet_avgpool1x1_s2()
self.conv = conv1x1(in_channels=in_channels, out_channels=out_channels)
if self.extra_padding:
self.pad = nn.ZeroPad2d(padding=(0, 1, 0, 1))
def forward(self, input_0):
primals_2 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| earhian/imgclsmob | NasPathBranch | false | 6,628 | [
"MIT"
] | 1 | c87c0942420876941868c016211073dec4392e4d | https://github.com/earhian/imgclsmob/tree/c87c0942420876941868c016211073dec4392e4d | import torch
import torch.nn as nn
import torch.utils.data
def conv1x1(in_channels, out_channels, stride=1, bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=stride, bias=bias)
def nasnet_avgpool1x1_s2():
"""
NASNet specific 1x1 Average pooling layer with stride 2.
"""
return nn.AvgPool2d(kernel_size=1, stride=2, count_include_pad=False)
class Model(nn.Module):
"""
NASNet specific `path` branch (auxiliary block).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
"""
def __init__(self, in_channels, out_channels, extra_padding=False):
super().__init__()
self.extra_padding = extra_padding
self.avgpool = nasnet_avgpool1x1_s2()
self.conv = conv1x1(in_channels=in_channels, out_channels=out_channels)
if self.extra_padding:
self.pad = nn.ZeroPad2d(padding=(0, 1, 0, 1))
def forward(self, x):
if self.extra_padding:
x = self.pad(x)
x = x[:, :, 1:, 1:].contiguous()
x = self.avgpool(x)
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
IBNbResInitBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xl/cxlk2zmc3hpwjqxewas3mgw6gadfa7aumzsmflghwni54n2ev2cj.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# x_1 => repeat
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4]), kwargs = {})
triton_poi_fused_repeat_0 = async_compile.triton('triton_poi_fused_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4z/c4zvwvsryxghc5ysn6bpnjiv32y2hcfq23grsiocdduitkk25dil.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused__native_batch_norm_legit_1 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2l/c2ltoxti4vablzqcsbomvbkbrq355u3db4abx22nd2iin26g2lsf.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 4)
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4g/c4gcr34isn5uv5kxz7a4uttjmiiim6ryfo2imaotdptez6c56f53.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + ((-3) + (4*x0)), tmp6 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + ((-2) + (4*x0)), tmp11 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp13 = triton_helpers.maximum(tmp12, tmp7)
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + ((-1) + (4*x0)), tmp18 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp20 = triton_helpers.maximum(tmp19, tmp13)
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + ((-1) + (4*x0)), tmp21 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp23 = triton_helpers.maximum(tmp22, tmp20)
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + (4*x0), tmp24 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp26 = triton_helpers.maximum(tmp25, tmp23)
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + (4*x0)), tmp27 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp29 = triton_helpers.maximum(tmp28, tmp26)
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + (4*x0)), tmp30 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp29)
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + (4*x0)), tmp33 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + (4*x0)), tmp36 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = tmp12 > tmp7
tmp40 = tl.full([1], 1, tl.int8)
tmp41 = tl.full([1], 0, tl.int8)
tmp42 = tl.where(tmp39, tmp40, tmp41)
tmp43 = tmp19 > tmp13
tmp44 = tl.full([1], 2, tl.int8)
tmp45 = tl.where(tmp43, tmp44, tmp42)
tmp46 = tmp22 > tmp20
tmp47 = tl.full([1], 3, tl.int8)
tmp48 = tl.where(tmp46, tmp47, tmp45)
tmp49 = tmp25 > tmp23
tmp50 = tl.full([1], 4, tl.int8)
tmp51 = tl.where(tmp49, tmp50, tmp48)
tmp52 = tmp28 > tmp26
tmp53 = tl.full([1], 5, tl.int8)
tmp54 = tl.where(tmp52, tmp53, tmp51)
tmp55 = tmp31 > tmp29
tmp56 = tl.full([1], 6, tl.int8)
tmp57 = tl.where(tmp55, tmp56, tmp54)
tmp58 = tmp34 > tmp32
tmp59 = tl.full([1], 7, tl.int8)
tmp60 = tl.where(tmp58, tmp59, tmp57)
tmp61 = tmp37 > tmp35
tmp62 = tl.full([1], 8, tl.int8)
tmp63 = tl.where(tmp61, tmp62, tmp60)
tl.store(out_ptr0 + (x0), tmp38, xmask)
tl.store(out_ptr1 + (x0), tmp63, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.repeat]
stream0 = get_raw_stream(0)
triton_poi_fused_repeat_0.run(primals_3, buf1, 16, grid=grid(16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit]
triton_poi_fused__native_batch_norm_legit_1.run(buf0, buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf0, buf2, buf3, buf1, primals_4, buf4, 64, grid=grid(64), stream=stream0)
del buf2
del primals_4
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf3 # reuse
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf4, buf5, buf6, 16, grid=grid(16), stream=stream0)
return (buf5, primals_1, primals_2, buf0, buf1, buf4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 7, 7), (196, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
def ibnb_conv7x7_block(in_channels, out_channels, stride=1, padding=3, bias
=False, activate=True):
"""
7x7 version of the IBN(b)-ResNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
"""
return IBNbConvBlock(in_channels=in_channels, out_channels=out_channels,
kernel_size=7, stride=stride, padding=padding, bias=bias, activate=
activate)
class IBNbConvBlock(nn.Module):
"""
IBN(b)-ResNet specific convolution block with Instance normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation=1, groups=1, bias=False, activate=True):
super(IBNbConvBlock, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, dilation=dilation, groups=groups, bias=bias)
self.inst_norm = nn.InstanceNorm2d(num_features=out_channels,
affine=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.inst_norm(x)
if self.activate:
x = self.activ(x)
return x
class IBNbResInitBlock(nn.Module):
"""
IBN(b)-ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self, in_channels, out_channels):
super(IBNbResInitBlock, self).__init__()
self.conv = ibnb_conv7x7_block(in_channels=in_channels,
out_channels=out_channels, stride=2)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 4
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + (-3 + 4 * x0), tmp6 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + (-2 + 4 * x0), tmp11 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp13 = triton_helpers.maximum(tmp12, tmp7)
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + (-1 + 4 * x0), tmp18 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp20 = triton_helpers.maximum(tmp19, tmp13)
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + (-1 + 4 * x0), tmp21 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp23 = triton_helpers.maximum(tmp22, tmp20)
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + 4 * x0, tmp24 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp26 = triton_helpers.maximum(tmp25, tmp23)
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + 4 * x0), tmp27 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp29 = triton_helpers.maximum(tmp28, tmp26)
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + 4 * x0), tmp30 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp29)
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + 4 * x0), tmp33 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + 4 * x0), tmp36 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = tmp12 > tmp7
tmp40 = tl.full([1], 1, tl.int8)
tmp41 = tl.full([1], 0, tl.int8)
tmp42 = tl.where(tmp39, tmp40, tmp41)
tmp43 = tmp19 > tmp13
tmp44 = tl.full([1], 2, tl.int8)
tmp45 = tl.where(tmp43, tmp44, tmp42)
tmp46 = tmp22 > tmp20
tmp47 = tl.full([1], 3, tl.int8)
tmp48 = tl.where(tmp46, tmp47, tmp45)
tmp49 = tmp25 > tmp23
tmp50 = tl.full([1], 4, tl.int8)
tmp51 = tl.where(tmp49, tmp50, tmp48)
tmp52 = tmp28 > tmp26
tmp53 = tl.full([1], 5, tl.int8)
tmp54 = tl.where(tmp52, tmp53, tmp51)
tmp55 = tmp31 > tmp29
tmp56 = tl.full([1], 6, tl.int8)
tmp57 = tl.where(tmp55, tmp56, tmp54)
tmp58 = tmp34 > tmp32
tmp59 = tl.full([1], 7, tl.int8)
tmp60 = tl.where(tmp58, tmp59, tmp57)
tmp61 = tmp37 > tmp35
tmp62 = tl.full([1], 8, tl.int8)
tmp63 = tl.where(tmp61, tmp62, tmp60)
tl.store(out_ptr0 + x0, tmp38, xmask)
tl.store(out_ptr1 + x0, tmp63, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2,
2), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = empty_strided_cuda((16,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_repeat_0[grid(16)](primals_3, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
triton_poi_fused__native_batch_norm_legit_1[grid(16)](buf0, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_relu_2[grid(64)](buf0, buf2, buf3, buf1, primals_4,
buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
del primals_4
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf3
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(16)](buf4, buf5,
buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf5, primals_1, primals_2, buf0, buf1, buf4, buf6
def ibnb_conv7x7_block(in_channels, out_channels, stride=1, padding=3, bias
=False, activate=True):
"""
7x7 version of the IBN(b)-ResNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
"""
return IBNbConvBlock(in_channels=in_channels, out_channels=out_channels,
kernel_size=7, stride=stride, padding=padding, bias=bias, activate=
activate)
class IBNbConvBlock(nn.Module):
"""
IBN(b)-ResNet specific convolution block with Instance normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation=1, groups=1, bias=False, activate=True):
super(IBNbConvBlock, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, dilation=dilation, groups=groups, bias=bias)
self.inst_norm = nn.InstanceNorm2d(num_features=out_channels,
affine=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.inst_norm(x)
if self.activate:
x = self.activ(x)
return x
class IBNbResInitBlockNew(nn.Module):
"""
IBN(b)-ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self, in_channels, out_channels):
super(IBNbResInitBlockNew, self).__init__()
self.conv = ibnb_conv7x7_block(in_channels=in_channels,
out_channels=out_channels, stride=2)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, input_0):
primals_1 = self.conv.conv.weight
primals_3 = self.conv.inst_norm.weight
primals_4 = self.conv.inst_norm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| earhian/imgclsmob | IBNbResInitBlock | false | 6,629 | [
"MIT"
] | 1 | c87c0942420876941868c016211073dec4392e4d | https://github.com/earhian/imgclsmob/tree/c87c0942420876941868c016211073dec4392e4d | import torch
import torch.nn as nn
import torch.utils.data
def ibnb_conv7x7_block(in_channels, out_channels, stride=1, padding=3, bias
=False, activate=True):
"""
7x7 version of the IBN(b)-ResNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
"""
return IBNbConvBlock(in_channels=in_channels, out_channels=out_channels,
kernel_size=7, stride=stride, padding=padding, bias=bias, activate=
activate)
class IBNbConvBlock(nn.Module):
"""
IBN(b)-ResNet specific convolution block with Instance normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation=1, groups=1, bias=False, activate=True):
super().__init__()
self.activate = activate
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, dilation=dilation, groups=groups, bias=bias)
self.inst_norm = nn.InstanceNorm2d(num_features=out_channels,
affine=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.inst_norm(x)
if self.activate:
x = self.activ(x)
return x
class Model(nn.Module):
"""
IBN(b)-ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = ibnb_conv7x7_block(in_channels=in_channels,
out_channels=out_channels, stride=2)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BCEFocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/n4/cn4l67wrwka2oqlbqovgswkunvp62yjnmtlpzhqpoe2wqbjmrkfj.py
# Topologically Sorted Source Nodes: [logp, neg, p, sub, pow_1, loss, mean], Original ATen: [aten.binary_cross_entropy_with_logits, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mul, aten.mean]
# Source node to ATen node mapping:
# logp => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2
# loss => mul_1
# mean => mean
# neg => neg_1
# p => exp_1
# pow_1 => pow_1
# sub => sub_3
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_2,), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %sub_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = -tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp1 - tmp14
tmp16 = tmp1 * tmp12
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [logp, neg, p, sub, pow_1, loss, mean], Original ATen: [aten.binary_cross_entropy_with_logits, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mul, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BCEFocalLoss(nn.Module):
"""Implementation of Focal Loss for Binary Classification Problems.
Focal loss was proposed in [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self, gamma=0, eps=1e-07, reduction='mean'):
"""Constructor Method for FocalLoss class.
Args:
gamma : The focal parameter. Defaults to 0.
eps : Constant for computational stability.
reduction: The reduction parameter for Cross Entropy Loss.
"""
super(BCEFocalLoss, self).__init__()
self.gamma = gamma
self.reduction = reduction
self.eps = eps
self.bce = torch.nn.BCEWithLogitsLoss(reduction='none')
def forward(self, logits: 'torch.Tensor', targets: 'torch.Tensor'
) ->torch.Tensor:
"""Forward method.
Args:
logits: The raw logits from the network of shape (N,*) where C = number of classes , * = extra dims
targets: The targets
Returns:
The computed loss value
"""
targets = targets.view(logits.shape)
logp = self.bce(logits, targets)
p = torch.exp(-logp)
loss = (1 - p) ** self.gamma * logp
return loss.mean() if self.reduction == 'mean' else loss.sum(
) if self.reduction == 'sum' else loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = -tmp12
tmp14 = tl_math.exp(tmp13)
tmp1 - tmp14
tmp16 = tmp1 * tmp12
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = 256.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0[
grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCEFocalLossNew(nn.Module):
"""Implementation of Focal Loss for Binary Classification Problems.
Focal loss was proposed in [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self, gamma=0, eps=1e-07, reduction='mean'):
"""Constructor Method for FocalLoss class.
Args:
gamma : The focal parameter. Defaults to 0.
eps : Constant for computational stability.
reduction: The reduction parameter for Cross Entropy Loss.
"""
super(BCEFocalLossNew, self).__init__()
self.gamma = gamma
self.reduction = reduction
self.eps = eps
self.bce = torch.nn.BCEWithLogitsLoss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| earlbabson/torchflare | BCEFocalLoss | false | 6,630 | [
"Apache-2.0"
] | 1 | 15db06d313a53a3ec4640869335ba87730562b28 | https://github.com/earlbabson/torchflare/tree/15db06d313a53a3ec4640869335ba87730562b28 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Implementation of Focal Loss for Binary Classification Problems.
Focal loss was proposed in [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self, gamma=0, eps=1e-07, reduction='mean'):
"""Constructor Method for FocalLoss class.
Args:
gamma : The focal parameter. Defaults to 0.
eps : Constant for computational stability.
reduction: The reduction parameter for Cross Entropy Loss.
"""
super().__init__()
self.gamma = gamma
self.reduction = reduction
self.eps = eps
self.bce = torch.nn.BCEWithLogitsLoss(reduction='none')
def forward(self, logits: 'torch.Tensor', targets: 'torch.Tensor'
) ->torch.Tensor:
"""Forward method.
Args:
logits: The raw logits from the network of shape (N,*) where C = number of classes , * = extra dims
targets: The targets
Returns:
The computed loss value
"""
targets = targets.view(logits.shape)
logp = self.bce(logits, targets)
p = torch.exp(-logp)
loss = (1 - p) ** self.gamma * logp
return loss.mean() if self.reduction == 'mean' else loss.sum(
) if self.reduction == 'sum' else loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LeNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ve/cven4fg5p554vidmwu2g3ea6jgqwadbawpokvsw357czr2wouxgw.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# x => convolution
# x_1 => sigmoid
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 98304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 6
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qz/cqz2afmrnuzfkvjeuboetwjy53aw27ea6kbdvaojvehvsnmlpibc.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_2 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%sigmoid, [5, 5], [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 25, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 21600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = (xindex // 30) % 30
x2 = (xindex // 900)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (66 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (67 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (68 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (128 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (129 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (130 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (131 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (132 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (192 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (193 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (194 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (195 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (196 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (256 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (257 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (258 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (259 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (260 + (2*x0) + (128*x1) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + (x3), tmp50, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/n7/cn7okv6oflbmhd4mtrlboa5nl5m3rrlwo4btzman7mpv3jrayhye.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => sigmoid_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid_1 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_sigmoid_2 = async_compile.triton('triton_poi_fused_convolution_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 10816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 169) % 16
x2 = (xindex // 2704)
x4 = xindex % 2704
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x4 + (2720*x2)), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ow/cow3adnhdtf4hslx7he6cgthapbc3enub5ejfad2vez6kmjhk7y4.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_5 => avg_pool2d_1
# Graph fragment:
# %avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%sigmoid_1, [5, 5], [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 25, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5) % 5
x2 = (xindex // 25) % 16
x3 = (xindex // 400)
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (13 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (14 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (15 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (16 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (17 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (26 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (27 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (28 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (29 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (30 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (39 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (40 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (41 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (42 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (43 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (52 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (53 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (54 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (55 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (56 + (2*x0) + (26*x1) + (169*x2) + (2720*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + (x4), tmp50, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ro/cromrs4zaduq7m7se2v4z3w5znirybvx4xdcc34q24m5zvs25ttz.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_8 => sigmoid_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hr/chr2suwacm2elmaum6cxyefw762ibu5yr35eg7zkmfx33cr7hkuk.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_10 => sigmoid_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_5 = async_compile.triton('triton_poi_fused_sigmoid_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120, ), (1, ))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84, ), (1, ))
assert_size_stride(primals_10, (7, 84), (84, 1))
assert_size_stride(primals_11, (7, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 64, 64), (24576, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_sigmoid_0.run(buf1, primals_2, 98304, grid=grid(98304), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 6, 30, 30), (5400, 900, 30, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_1.run(buf1, buf2, 21600, grid=grid(21600), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 13, 13), (2704, 169, 13, 1))
buf4 = empty_strided_cuda((4, 16, 13, 13), (2720, 169, 13, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_2.run(buf3, primals_5, buf4, 10816, grid=grid(10816), stream=stream0)
del buf3
del primals_5
buf5 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_3.run(buf4, buf5, 1600, grid=grid(1600), stream=stream0)
buf6 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_4.run(buf7, primals_7, 480, grid=grid(480), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf7, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_5.run(buf9, primals_9, 336, grid=grid(336), stream=stream0)
del primals_9
buf10 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf9, reinterpret_tensor(primals_10, (84, 7), (1, 84), 0), alpha=1, beta=1, out=buf10)
del primals_11
return (buf10, buf10, primals_1, primals_3, primals_4, buf1, buf2, buf4, reinterpret_tensor(buf5, (4, 400), (400, 1), 0), buf7, buf9, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((120, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((7, 84), (84, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((7, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LeNet(nn.Module):
def __init__(self):
super().__init__()
self.conv_1 = nn.Conv2d(3, 6, kernel_size=5, padding=2)
self.sigmoid = nn.Sigmoid()
self.avgpool = nn.AvgPool2d(kernel_size=5, stride=2)
self.conv_2 = nn.Conv2d(6, 16, kernel_size=5, stride=2)
self.fc_1 = nn.Linear(16 * 5 * 5, 120)
self.fc_2 = nn.Linear(120, 84)
self.fc_3 = nn.Linear(84, 7)
def get_resnet_layer(self, block, n_blocks, channels, stride=1):
layers = []
if self.in_channels != block.expansion * channels:
downsample = True
else:
downsample = False
layers.append(block(self.in_channels, channels, stride, downsample))
for i in range(1, n_blocks):
layers.append(block(block.expansion * channels, channels))
self.in_channels = block.expansion * channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1(x)
x = self.sigmoid(x)
x = self.avgpool(x)
x = self.conv_2(x)
x = self.sigmoid(x)
x = self.avgpool(x)
x = nn.Flatten()(x)
x = self.fc_1(x)
x = self.sigmoid(x)
x = self.fc_2(x)
x = self.sigmoid(x)
x = self.fc_3(x)
h = x.view(x.shape[0], -1)
return x, h
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 6
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 21600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (66 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (67 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (68 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (128 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (129 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (130 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (131 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (132 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (192 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (193 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (194 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (195 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (196 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (256 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (257 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (258 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (259 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (260 + 2 * x0 + 128 * x1 + 4096 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + x3, tmp50, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 10816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 169 % 16
x2 = xindex // 2704
x4 = xindex % 2704
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x4 + 2720 * x2), tmp3, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5 % 5
x2 = xindex // 25 % 16
x3 = xindex // 400
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3),
xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3),
xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3),
xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3),
xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (13 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3),
xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (14 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (15 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (16 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (17 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (26 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (27 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (28 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (29 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (30 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (39 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (40 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (41 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (42 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr0 + (43 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (52 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (53 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr0 + (54 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (55 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr0 + (56 + 2 * x0 + 26 * x1 + 169 * x2 + 2720 * x3
), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp32 = tmp31 + tmp30
tmp34 = tmp33 + tmp32
tmp36 = tmp35 + tmp34
tmp38 = tmp37 + tmp36
tmp40 = tmp39 + tmp38
tmp42 = tmp41 + tmp40
tmp44 = tmp43 + tmp42
tmp46 = tmp45 + tmp44
tmp48 = tmp47 + tmp46
tmp49 = 0.04
tmp50 = tmp48 * tmp49
tl.store(out_ptr0 + x4, tmp50, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_sigmoid_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (7, 84), (84, 1))
assert_size_stride(primals_11, (7,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 64, 64), (24576, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_sigmoid_0[grid(98304)](buf1, primals_2,
98304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 30, 30), (5400, 900, 30, 1), torch
.float32)
triton_poi_fused_avg_pool2d_1[grid(21600)](buf1, buf2, 21600,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 13, 13), (2704, 169, 13, 1))
buf4 = empty_strided_cuda((4, 16, 13, 13), (2720, 169, 13, 1),
torch.float32)
triton_poi_fused_convolution_sigmoid_2[grid(10816)](buf3, primals_5,
buf4, 10816, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
buf5 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_avg_pool2d_3[grid(1600)](buf4, buf5, 1600, XBLOCK=
128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_sigmoid_4[grid(480)](buf7, primals_7, 480, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(primals_8, (120, 84), (1,
120), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_sigmoid_5[grid(336)](buf9, primals_9, 336, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 7), (7, 1), torch.float32)
extern_kernels.addmm(primals_11, buf9, reinterpret_tensor(
primals_10, (84, 7), (1, 84), 0), alpha=1, beta=1, out=buf10)
del primals_11
return (buf10, buf10, primals_1, primals_3, primals_4, buf1, buf2, buf4,
reinterpret_tensor(buf5, (4, 400), (400, 1), 0), buf7, buf9,
primals_10, primals_8, primals_6)
class LeNetNew(nn.Module):
def __init__(self):
super().__init__()
self.conv_1 = nn.Conv2d(3, 6, kernel_size=5, padding=2)
self.sigmoid = nn.Sigmoid()
self.avgpool = nn.AvgPool2d(kernel_size=5, stride=2)
self.conv_2 = nn.Conv2d(6, 16, kernel_size=5, stride=2)
self.fc_1 = nn.Linear(16 * 5 * 5, 120)
self.fc_2 = nn.Linear(120, 84)
self.fc_3 = nn.Linear(84, 7)
def get_resnet_layer(self, block, n_blocks, channels, stride=1):
layers = []
if self.in_channels != block.expansion * channels:
downsample = True
else:
downsample = False
layers.append(block(self.in_channels, channels, stride, downsample))
for i in range(1, n_blocks):
layers.append(block(block.expansion * channels, channels))
self.in_channels = block.expansion * channels
return nn.Sequential(*layers)
def forward(self, input_0):
primals_1 = self.conv_1.weight
primals_2 = self.conv_1.bias
primals_4 = self.conv_2.weight
primals_5 = self.conv_2.bias
primals_6 = self.fc_1.weight
primals_7 = self.fc_1.bias
primals_8 = self.fc_2.weight
primals_9 = self.fc_2.bias
primals_10 = self.fc_3.weight
primals_11 = self.fc_3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
| ducnguyenhuynh/via-trafficsign-classification | LeNet | false | 6,631 | [
"MIT"
] | 1 | e65fccc1ee377603334453eacfc3f65619dc0714 | https://github.com/ducnguyenhuynh/via-trafficsign-classification/tree/e65fccc1ee377603334453eacfc3f65619dc0714 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv_1 = nn.Conv2d(3, 6, kernel_size=5, padding=2)
self.sigmoid = nn.Sigmoid()
self.avgpool = nn.AvgPool2d(kernel_size=5, stride=2)
self.conv_2 = nn.Conv2d(6, 16, kernel_size=5, stride=2)
self.fc_1 = nn.Linear(16 * 5 * 5, 120)
self.fc_2 = nn.Linear(120, 84)
self.fc_3 = nn.Linear(84, 7)
def get_resnet_layer(self, block, n_blocks, channels, stride=1):
layers = []
if self.in_channels != block.expansion * channels:
downsample = True
else:
downsample = False
layers.append(block(self.in_channels, channels, stride, downsample))
for i in range(1, n_blocks):
layers.append(block(block.expansion * channels, channels))
self.in_channels = block.expansion * channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1(x)
x = self.sigmoid(x)
x = self.avgpool(x)
x = self.conv_2(x)
x = self.sigmoid(x)
x = self.avgpool(x)
x = nn.Flatten()(x)
x = self.fc_1(x)
x = self.sigmoid(x)
x = self.fc_2(x)
x = self.sigmoid(x)
x = self.fc_3(x)
h = x.view(x.shape[0], -1)
return x, h
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [logp], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# logp => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/s5/cs5wshnrdka3xma3btqijhothwpkw4ctmtyvsdzkv6seotnt4jpf.py
# Topologically Sorted Source Nodes: [logp, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean]
# Source node to ATen node mapping:
# logp => exp, log, mul, neg, sub_1, sum_1, sum_2
# loss => mul_1
# mean => mean
# neg => neg_1
# p => exp_1
# pow_1 => pow_1
# sub => sub_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %neg : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%neg,), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %neg), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {})
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp13 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp31 = tmp30 - tmp29
tmp32 = tmp30 * tmp27
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = 64.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp37, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logp], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [logp, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mean]
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1.run(buf3, buf0, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FocalLoss(nn.Module):
"""Implementation of Focal Loss.
Focal loss was proposed in [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self, gamma=0, eps=1e-07, reduction='mean'):
"""Constructor Method for FocalLoss class.
Args:
gamma : The focal parameter. Defaults to 0.
eps : Constant for computational stability.
reduction: The reduction parameter for Cross Entropy Loss.
"""
super(FocalLoss, self).__init__()
self.gamma = gamma
self.reduction = reduction
self.eps = eps
self.ce = torch.nn.CrossEntropyLoss(reduction='none')
def forward(self, logits: 'torch.Tensor', targets: 'torch.Tensor'
) ->torch.Tensor:
"""Forward method.
Args:
logits: The raw logits from the network of shape (N,C,*) where C = number of classes , * = extra dims
targets: The targets of shape (N , *).
Returns:
The computed loss value
"""
logp = self.ce(logits, targets)
p = torch.exp(-logp)
loss = (1 - p) ** self.gamma * logp
return loss.mean() if self.reduction == 'mean' else loss.sum(
) if self.reduction == 'sum' else loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp13 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp16 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp20 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp24 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tmp28 = -tmp27
tmp29 = tl_math.exp(tmp28)
tmp30 = 1.0
tmp30 - tmp29
tmp32 = tmp30 * tmp27
tmp33 = tl.broadcast_to(tmp32, [XBLOCK, RBLOCK])
tmp35 = tl.sum(tmp33, 1)[:, None]
tmp36 = 64.0
tmp37 = tmp35 / tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp37, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_exp_mean_mul_neg_pow_rsub_sum_1[grid(1)](
buf3, buf0, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf3,
class FocalLossNew(nn.Module):
"""Implementation of Focal Loss.
Focal loss was proposed in [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self, gamma=0, eps=1e-07, reduction='mean'):
"""Constructor Method for FocalLoss class.
Args:
gamma : The focal parameter. Defaults to 0.
eps : Constant for computational stability.
reduction: The reduction parameter for Cross Entropy Loss.
"""
super(FocalLossNew, self).__init__()
self.gamma = gamma
self.reduction = reduction
self.eps = eps
self.ce = torch.nn.CrossEntropyLoss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| earlbabson/torchflare | FocalLoss | false | 6,632 | [
"Apache-2.0"
] | 1 | 15db06d313a53a3ec4640869335ba87730562b28 | https://github.com/earlbabson/torchflare/tree/15db06d313a53a3ec4640869335ba87730562b28 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Implementation of Focal Loss.
Focal loss was proposed in [Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self, gamma=0, eps=1e-07, reduction='mean'):
"""Constructor Method for FocalLoss class.
Args:
gamma : The focal parameter. Defaults to 0.
eps : Constant for computational stability.
reduction: The reduction parameter for Cross Entropy Loss.
"""
super().__init__()
self.gamma = gamma
self.reduction = reduction
self.eps = eps
self.ce = torch.nn.CrossEntropyLoss(reduction='none')
def forward(self, logits: 'torch.Tensor', targets: 'torch.Tensor'
) ->torch.Tensor:
"""Forward method.
Args:
logits: The raw logits from the network of shape (N,C,*) where C = number of classes , * = extra dims
targets: The targets of shape (N , *).
Returns:
The computed loss value
"""
logp = self.ce(logits, targets)
p = torch.exp(-logp)
loss = (1 - p) ** self.gamma * logp
return loss.mean() if self.reduction == 'mean' else loss.sum(
) if self.reduction == 'sum' else loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
EncoderUnit | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yz/cyzvc2lavs7gif3drpgmv6u3vaeybasxoxhjuhsybhfhgrpitob7.py
# Topologically Sorted Source Nodes: [res], Original ATen: [aten.view]
# Source node to ATen node mapping:
# res => view_16
# Graph fragment:
# %view_16 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_15, [16, 16]), kwargs = {})
triton_poi_fused_view_0 = async_compile.triton('triton_poi_fused_view_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fx/cfx3rb767htacjaaow6ngg7uoimmormzaz5kp22hi36idc7eiq7a.py
# Topologically Sorted Source Nodes: [add, a1_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# a1_1 => var_mean
# add => add
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %view_17), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lq/clqun7a22d6o575lvu76lrmfxtyhqk32m65xzyev43u4fswkcibw.py
# Topologically Sorted Source Nodes: [add, a1_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# a1_1 => add_1, add_2, mul, mul_1, rsqrt, sub_1
# add => add
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %view_17), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_2 = async_compile.triton('triton_poi_fused_add_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2p/c2pm7fcuiiuh5hyqbubeggibubfi466wlrv2fl7wi6jyflo44gfc.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_19,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jv/cjv5pbuwj6uw6ulmnpz7dt3x3thujvicbpxydittp7rhtxyf47js.py
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_1 => add_3
# Graph fragment:
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_21), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p4/cp46n25cqjor44iv7w4w5d63qmmsxnkrtdqpd7lbcbaozxlm5as4.py
# Topologically Sorted Source Nodes: [a2_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# a2_1 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_5 = async_compile.triton('triton_poi_fused_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ox/coxmjqobcehchw67m6p2zasqd2aeil3dt274wt7sztmjarah6swf.py
# Topologically Sorted Source Nodes: [a2_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# a2_1 => add_4, add_5, mul_2, mul_3, rsqrt_1, sub_2, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_12), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_13), kwargs = {})
triton_poi_fused_native_layer_norm_6 = async_compile.triton('triton_poi_fused_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 16), (16, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4, 16), (16, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [Q], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [K], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [V], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), primals_3, out=buf2)
del primals_3
# Topologically Sorted Source Nodes: [], Original ATen: []
buf3 = torch.ops.aten._scaled_dot_product_efficient_attention.default(reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 4, 16, 1), 0), None, True, scale=0.5)
buf4 = buf3[0]
buf5 = buf3[1]
buf6 = buf3[2]
buf7 = buf3[3]
del buf3
buf8 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [res], Original ATen: [aten.view]
stream0 = get_raw_stream(0)
triton_poi_fused_view_0.run(buf4, buf8, 256, grid=grid(256), stream=stream0)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [res], Original ATen: [aten.mm]
extern_kernels.mm(buf8, primals_4, out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, a1_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_1.run(primals_5, buf9, buf10, buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, a1_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_2.run(primals_5, buf9, buf10, buf11, primals_6, primals_7, buf12, 64, grid=grid(64), stream=stream0)
del primals_7
buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf13)
buf14 = reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0); del buf13 # reuse
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_3.run(buf14, primals_9, buf20, 64, grid=grid(64), stream=stream0)
del primals_9
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf16, buf12, primals_11, 64, grid=grid(64), stream=stream0)
del primals_11
buf17 = buf11; del buf11 # reuse
buf18 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [a2_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_5.run(buf16, buf17, buf18, 16, grid=grid(16), stream=stream0)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [a2_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_6.run(buf16, buf17, buf18, primals_12, primals_13, buf19, 64, grid=grid(64), stream=stream0)
del buf17
del buf18
del primals_13
return (buf19, primals_5, primals_6, primals_12, reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 4, 16, 1), 0), buf4, buf5, buf6, buf7, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(buf14, (16, 4), (4, 1), 0), buf16, primals_10, buf20, primals_8, reinterpret_tensor(buf8, (16, 16), (1, 16), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
class ScaledDotProductAttention(nn.Module):
"""
Scaled Dot-Product Attention Layer
Attributes
----------
softmax : nn.Functional
softmax function applied at the last dimension
"""
def __init__(self, dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
def forward(self, Q, K, V, mask=None):
"""
Parameters
----------
Q : 4d tensor (batch_size, h, seq_len, d_K)
K : 4d tensor (batch_size, h, seq_len, d_K)
V : 4d tensor (batch_size, h, seq_len, d_V)
mask : 2d tensor (seq_len, seq_len)
2d binary tensor, where 1 means connection should be blocked and
0 means values can be fed forward to softmax
Returns
-------
4d tensor (batch_size, h, seq_len, d_V)
"""
scaled = torch.matmul(Q, K.transpose_(2, 3))
scaled = scaled / math.sqrt(K.shape[3])
if mask is not None:
scaled.masked_fill_(mask, float('-inf'))
scaled = self.softmax(scaled)
scaled = self.dropout(scaled)
attn = torch.matmul(scaled, V)
return attn
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention Layer
Attributes
----------
h : int
number of parallel heads
d_K : int
dimension of features in both query and key
d_V : int
dimension of features in value
d_model : int
dimension of token embedding
spd_attn: ScaledDotProductattention layer
sub module to apply scaled dot product
W_Q : 2d tensor (d_model, d_K * h)
learned parameters used to linearly project query to Q
W_K : 2d tensor (d_model, d_K * h)
learned parameters used to linearly project key to K
W_V : 2d tensor (d_model, d_V * h)
learned parameters used to linearly project val to V
W_O : 2d tensor (d_V * h, d_model)
learned parameters used to linearly project scaled attention
to the output tensor with the same dimension as input
"""
def __init__(self, h, d_model, d_K, d_V, dropout=0.1):
super(MultiHeadAttention, self).__init__()
self.h = h
self.d_K = d_K
self.d_V = d_V
self.d_model = d_model
self.sdp_attn = ScaledDotProductAttention(dropout=dropout)
self.W_Q = nn.Parameter(torch.Tensor(d_model, d_K * h))
self.W_K = nn.Parameter(torch.Tensor(d_model, d_K * h))
self.W_V = nn.Parameter(torch.Tensor(d_model, d_V * h))
self.W_O = nn.Parameter(torch.Tensor(d_V * h, d_model))
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
slope = math.sqrt(5)
nn.init.kaiming_uniform_(self.W_Q, a=slope)
nn.init.kaiming_uniform_(self.W_K, a=slope)
nn.init.kaiming_uniform_(self.W_V, a=slope)
nn.init.kaiming_uniform_(self.W_O, a=slope)
def forward(self, query, key, val, mask=None):
"""
Parameters
----------
query : 3d tensor (batch_size, seq_len, d_model)
embedded query sequence
key : 3d tensor (batch_size, seq_len, d_model)
embedded key sequence
val : 3d tensor (batch_size, seq_len, d_model)
embedded value sequence
mask : 2d tensor (seq_len, seq_len)
2d binary tensor, where 1 means pass, 0 means block
Returns
-------
3d tensor (batch_size, seq_len, d_model)
"""
h, d_K, d_V, _d_model = self.h, self.d_K, self.d_V, self.d_model
W_Q, W_K, W_V, W_O = self.W_Q, self.W_K, self.W_V, self.W_O
bs_q, l_q = query.shape[0], query.shape[1]
bs_k, l_k = key.shape[0], key.shape[1]
bs_v, l_v = val.shape[0], val.shape[1]
Q = torch.matmul(query, W_Q)
K = torch.matmul(key, W_K)
V = torch.matmul(val, W_V)
Q = Q.view(bs_q, l_q, h, d_K)
K = K.view(bs_k, l_k, h, d_K)
V = V.view(bs_v, l_v, h, d_V)
Q.transpose_(1, 2)
K.transpose_(1, 2)
V.transpose_(1, 2)
head = self.sdp_attn(Q, K, V, mask)
head.transpose_(1, 2)
head = head.reshape(bs_q, l_q, h * d_V)
res = torch.matmul(head, W_O)
return self.dropout(res)
class FeedForwardNetwork(nn.Module):
"""
Position-wise Feed-Forward Network
Apply the following operation
f(x) = max(0, xW1 + b1) * W2 + b2
Attributes
----------
l1 : nn.Linear (in_features=d_model, out_features=d_ff)
l2 : nn.Linear (in_features=d_ff, out_features=d_model)
"""
def __init__(self, d_model, d_ff, dropout=0.1):
"""
Parameters
----------
d_model : int
embedding length
d_ff : int
size of intermediate activation
"""
super(FeedForwardNetwork, self).__init__()
self.l1 = nn.Linear(d_model, d_ff)
self.l2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.l2(torch.relu(self.l1(x)))
return self.dropout(x)
class EncoderUnit(nn.Module):
"""
Encoder Unit - build block of encoder
Attributes
----------
attn : MultiHeadAttention
norm1 : LayerNorm
ffn : FeedForwardNetwork
norm2 : LayerNorm
"""
def __init__(self, h, d_model, d_K, d_V, d_ff, dropout=0.1):
"""
Parameters
----------
h : int
number of attention heads
d_model : int
size of embedding
d_K : int
number of features in query and key
d_V : int
number of features in value
d_ff : int
dimension of the feed-forward network
"""
super(EncoderUnit, self).__init__()
self.attn = MultiHeadAttention(h, d_model, d_K, d_V)
self.norm1 = nn.LayerNorm(d_model)
self.ffn = FeedForwardNetwork(d_model, d_ff)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, seq):
"""
Parameters
----------
seq : 3d tensor (batch_size, seq_len, d_model)
Returns
-------
3d tensor (batch_size, seq_len, d_model)
"""
a1 = self.attn(seq, seq, seq)
a1 = self.norm1(seq + a1)
a2 = self.ffn(a1)
a2 = self.norm2(a1 + a2)
return a2
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'h': 4, 'd_model': 4, 'd_K': 4, 'd_V': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 16), (16, 1))
assert_size_stride(primals_2, (4, 16), (16, 1))
assert_size_stride(primals_3, (4, 16), (16, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0),
primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0),
primals_3, out=buf2)
del primals_3
buf3 = torch.ops.aten._scaled_dot_product_efficient_attention.default(
reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 4, 16, 1), 0),
reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 4, 16, 1), 0),
reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 4, 16, 1), 0), None,
True, scale=0.5)
buf4 = buf3[0]
buf5 = buf3[1]
buf6 = buf3[2]
buf7 = buf3[3]
del buf3
buf8 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_view_0[grid(256)](buf4, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(buf8, primals_4, out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_1[grid(16)](primals_5, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_2[grid(64)](primals_5, buf9,
buf10, buf11, primals_6, primals_7, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf13)
buf14 = reinterpret_tensor(buf13, (4, 4, 4), (16, 4, 1), 0)
del buf13
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(64)](buf14,
primals_9, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_9
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf14, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0)
del buf15
triton_poi_fused_add_4[grid(64)](buf16, buf12, primals_11, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_11
buf17 = buf11
del buf11
buf18 = buf10
del buf10
triton_poi_fused_native_layer_norm_5[grid(16)](buf16, buf17, buf18,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_6[grid(64)](buf16, buf17, buf18,
primals_12, primals_13, buf19, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf17
del buf18
del primals_13
return buf19, primals_5, primals_6, primals_12, reinterpret_tensor(buf0,
(4, 4, 4, 4), (64, 4, 16, 1), 0), reinterpret_tensor(buf1, (4, 4, 4,
4), (64, 4, 16, 1), 0), reinterpret_tensor(buf2, (4, 4, 4, 4), (64,
4, 16, 1), 0), buf4, buf5, buf6, buf7, buf9, reinterpret_tensor(buf12,
(16, 4), (4, 1), 0), reinterpret_tensor(buf14, (16, 4), (4, 1), 0
), buf16, primals_10, buf20, primals_8, reinterpret_tensor(buf8, (
16, 16), (1, 16), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0)
class ScaledDotProductAttention(nn.Module):
"""
Scaled Dot-Product Attention Layer
Attributes
----------
softmax : nn.Functional
softmax function applied at the last dimension
"""
def __init__(self, dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
def forward(self, Q, K, V, mask=None):
"""
Parameters
----------
Q : 4d tensor (batch_size, h, seq_len, d_K)
K : 4d tensor (batch_size, h, seq_len, d_K)
V : 4d tensor (batch_size, h, seq_len, d_V)
mask : 2d tensor (seq_len, seq_len)
2d binary tensor, where 1 means connection should be blocked and
0 means values can be fed forward to softmax
Returns
-------
4d tensor (batch_size, h, seq_len, d_V)
"""
scaled = torch.matmul(Q, K.transpose_(2, 3))
scaled = scaled / math.sqrt(K.shape[3])
if mask is not None:
scaled.masked_fill_(mask, float('-inf'))
scaled = self.softmax(scaled)
scaled = self.dropout(scaled)
attn = torch.matmul(scaled, V)
return attn
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention Layer
Attributes
----------
h : int
number of parallel heads
d_K : int
dimension of features in both query and key
d_V : int
dimension of features in value
d_model : int
dimension of token embedding
spd_attn: ScaledDotProductattention layer
sub module to apply scaled dot product
W_Q : 2d tensor (d_model, d_K * h)
learned parameters used to linearly project query to Q
W_K : 2d tensor (d_model, d_K * h)
learned parameters used to linearly project key to K
W_V : 2d tensor (d_model, d_V * h)
learned parameters used to linearly project val to V
W_O : 2d tensor (d_V * h, d_model)
learned parameters used to linearly project scaled attention
to the output tensor with the same dimension as input
"""
def __init__(self, h, d_model, d_K, d_V, dropout=0.1):
super(MultiHeadAttention, self).__init__()
self.h = h
self.d_K = d_K
self.d_V = d_V
self.d_model = d_model
self.sdp_attn = ScaledDotProductAttention(dropout=dropout)
self.W_Q = nn.Parameter(torch.Tensor(d_model, d_K * h))
self.W_K = nn.Parameter(torch.Tensor(d_model, d_K * h))
self.W_V = nn.Parameter(torch.Tensor(d_model, d_V * h))
self.W_O = nn.Parameter(torch.Tensor(d_V * h, d_model))
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
slope = math.sqrt(5)
nn.init.kaiming_uniform_(self.W_Q, a=slope)
nn.init.kaiming_uniform_(self.W_K, a=slope)
nn.init.kaiming_uniform_(self.W_V, a=slope)
nn.init.kaiming_uniform_(self.W_O, a=slope)
def forward(self, query, key, val, mask=None):
"""
Parameters
----------
query : 3d tensor (batch_size, seq_len, d_model)
embedded query sequence
key : 3d tensor (batch_size, seq_len, d_model)
embedded key sequence
val : 3d tensor (batch_size, seq_len, d_model)
embedded value sequence
mask : 2d tensor (seq_len, seq_len)
2d binary tensor, where 1 means pass, 0 means block
Returns
-------
3d tensor (batch_size, seq_len, d_model)
"""
h, d_K, d_V, _d_model = self.h, self.d_K, self.d_V, self.d_model
W_Q, W_K, W_V, W_O = self.W_Q, self.W_K, self.W_V, self.W_O
bs_q, l_q = query.shape[0], query.shape[1]
bs_k, l_k = key.shape[0], key.shape[1]
bs_v, l_v = val.shape[0], val.shape[1]
Q = torch.matmul(query, W_Q)
K = torch.matmul(key, W_K)
V = torch.matmul(val, W_V)
Q = Q.view(bs_q, l_q, h, d_K)
K = K.view(bs_k, l_k, h, d_K)
V = V.view(bs_v, l_v, h, d_V)
Q.transpose_(1, 2)
K.transpose_(1, 2)
V.transpose_(1, 2)
head = self.sdp_attn(Q, K, V, mask)
head.transpose_(1, 2)
head = head.reshape(bs_q, l_q, h * d_V)
res = torch.matmul(head, W_O)
return self.dropout(res)
class FeedForwardNetwork(nn.Module):
"""
Position-wise Feed-Forward Network
Apply the following operation
f(x) = max(0, xW1 + b1) * W2 + b2
Attributes
----------
l1 : nn.Linear (in_features=d_model, out_features=d_ff)
l2 : nn.Linear (in_features=d_ff, out_features=d_model)
"""
def __init__(self, d_model, d_ff, dropout=0.1):
"""
Parameters
----------
d_model : int
embedding length
d_ff : int
size of intermediate activation
"""
super(FeedForwardNetwork, self).__init__()
self.l1 = nn.Linear(d_model, d_ff)
self.l2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.l2(torch.relu(self.l1(x)))
return self.dropout(x)
class EncoderUnitNew(nn.Module):
"""
Encoder Unit - build block of encoder
Attributes
----------
attn : MultiHeadAttention
norm1 : LayerNorm
ffn : FeedForwardNetwork
norm2 : LayerNorm
"""
def __init__(self, h, d_model, d_K, d_V, d_ff, dropout=0.1):
"""
Parameters
----------
h : int
number of attention heads
d_model : int
size of embedding
d_K : int
number of features in query and key
d_V : int
number of features in value
d_ff : int
dimension of the feed-forward network
"""
super(EncoderUnitNew, self).__init__()
self.attn = MultiHeadAttention(h, d_model, d_K, d_V)
self.norm1 = nn.LayerNorm(d_model)
self.ffn = FeedForwardNetwork(d_model, d_ff)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, input_0):
primals_1 = self.attn.W_Q
primals_2 = self.attn.W_K
primals_3 = self.attn.W_V
primals_4 = self.attn.W_O
primals_6 = self.norm1.weight
primals_7 = self.norm1.bias
primals_8 = self.ffn.l1.weight
primals_9 = self.ffn.l1.bias
primals_10 = self.ffn.l2.weight
primals_11 = self.ffn.l2.bias
primals_12 = self.norm2.weight
primals_13 = self.norm2.bias
primals_5 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| dugusword/transformer | EncoderUnit | false | 6,633 | [
"MIT"
] | 1 | 7aa10968f0e60d545bbd17f1f8c1dfb7ee88c62b | https://github.com/dugusword/transformer/tree/7aa10968f0e60d545bbd17f1f8c1dfb7ee88c62b | import math
import torch
from torch import nn
class ScaledDotProductAttention(nn.Module):
"""
Scaled Dot-Product Attention Layer
Attributes
----------
softmax : nn.Functional
softmax function applied at the last dimension
"""
def __init__(self, dropout=0.1):
super().__init__()
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
def forward(self, Q, K, V, mask=None):
"""
Parameters
----------
Q : 4d tensor (batch_size, h, seq_len, d_K)
K : 4d tensor (batch_size, h, seq_len, d_K)
V : 4d tensor (batch_size, h, seq_len, d_V)
mask : 2d tensor (seq_len, seq_len)
2d binary tensor, where 1 means connection should be blocked and
0 means values can be fed forward to softmax
Returns
-------
4d tensor (batch_size, h, seq_len, d_V)
"""
scaled = torch.matmul(Q, K.transpose_(2, 3))
scaled = scaled / math.sqrt(K.shape[3])
if mask is not None:
scaled.masked_fill_(mask, float('-inf'))
scaled = self.softmax(scaled)
scaled = self.dropout(scaled)
attn = torch.matmul(scaled, V)
return attn
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention Layer
Attributes
----------
h : int
number of parallel heads
d_K : int
dimension of features in both query and key
d_V : int
dimension of features in value
d_model : int
dimension of token embedding
spd_attn: ScaledDotProductattention layer
sub module to apply scaled dot product
W_Q : 2d tensor (d_model, d_K * h)
learned parameters used to linearly project query to Q
W_K : 2d tensor (d_model, d_K * h)
learned parameters used to linearly project key to K
W_V : 2d tensor (d_model, d_V * h)
learned parameters used to linearly project val to V
W_O : 2d tensor (d_V * h, d_model)
learned parameters used to linearly project scaled attention
to the output tensor with the same dimension as input
"""
def __init__(self, h, d_model, d_K, d_V, dropout=0.1):
super().__init__()
self.h = h
self.d_K = d_K
self.d_V = d_V
self.d_model = d_model
self.sdp_attn = ScaledDotProductAttention(dropout=dropout)
self.W_Q = nn.Parameter(torch.Tensor(d_model, d_K * h))
self.W_K = nn.Parameter(torch.Tensor(d_model, d_K * h))
self.W_V = nn.Parameter(torch.Tensor(d_model, d_V * h))
self.W_O = nn.Parameter(torch.Tensor(d_V * h, d_model))
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
slope = math.sqrt(5)
nn.init.kaiming_uniform_(self.W_Q, a=slope)
nn.init.kaiming_uniform_(self.W_K, a=slope)
nn.init.kaiming_uniform_(self.W_V, a=slope)
nn.init.kaiming_uniform_(self.W_O, a=slope)
def forward(self, query, key, val, mask=None):
"""
Parameters
----------
query : 3d tensor (batch_size, seq_len, d_model)
embedded query sequence
key : 3d tensor (batch_size, seq_len, d_model)
embedded key sequence
val : 3d tensor (batch_size, seq_len, d_model)
embedded value sequence
mask : 2d tensor (seq_len, seq_len)
2d binary tensor, where 1 means pass, 0 means block
Returns
-------
3d tensor (batch_size, seq_len, d_model)
"""
h, d_K, d_V, _d_model = self.h, self.d_K, self.d_V, self.d_model
W_Q, W_K, W_V, W_O = self.W_Q, self.W_K, self.W_V, self.W_O
bs_q, l_q = query.shape[0], query.shape[1]
bs_k, l_k = key.shape[0], key.shape[1]
bs_v, l_v = val.shape[0], val.shape[1]
Q = torch.matmul(query, W_Q)
K = torch.matmul(key, W_K)
V
# ... truncated (>4000 chars) for memory efficiency |
SqueezeInitBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/z3/cz3vliqlpgih6ihwoaxl6cmnicfmv2ygutcuphilcsragp3evc57.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class SqueezeInitBlock(nn.Module):
"""
SqueezeNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
"""
def __init__(self, in_channels, out_channels, kernel_size):
super(SqueezeInitBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=2)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activ(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1,
primals_2, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class SqueezeInitBlockNew(nn.Module):
"""
SqueezeNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
"""
def __init__(self, in_channels, out_channels, kernel_size):
super(SqueezeInitBlockNew, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=2)
self.activ = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| earhian/imgclsmob | SqueezeInitBlock | false | 6,634 | [
"MIT"
] | 1 | c87c0942420876941868c016211073dec4392e4d | https://github.com/earhian/imgclsmob/tree/c87c0942420876941868c016211073dec4392e4d | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
SqueezeNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
"""
def __init__(self, in_channels, out_channels, kernel_size):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=2)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.activ(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
WRNBottleneck | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_4 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
def wrn_conv1x1(in_channels, out_channels, stride, activate):
"""
1x1 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=stride, padding=0, activate=activate)
def wrn_conv3x3(in_channels, out_channels, stride, activate):
"""
3x3 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=stride, padding=1, activate=activate)
class WRNConv(nn.Module):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, activate):
super(WRNConv, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, bias=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
class WRNBottleneck(nn.Module):
"""
WRN bottleneck block for residual path in WRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
"""
def __init__(self, in_channels, out_channels, stride, width_factor):
super(WRNBottleneck, self).__init__()
mid_channels = int(round(out_channels // 4 * width_factor))
self.conv1 = wrn_conv1x1(in_channels=in_channels, out_channels=
mid_channels, stride=1, activate=True)
self.conv2 = wrn_conv3x3(in_channels=mid_channels, out_channels=
mid_channels, stride=stride, activate=True)
self.conv3 = wrn_conv1x1(in_channels=mid_channels, out_channels=
out_channels, stride=1, activate=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'stride': 1,
'width_factor': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_1[grid(256)](buf5, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3
def wrn_conv1x1(in_channels, out_channels, stride, activate):
"""
1x1 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=stride, padding=0, activate=activate)
def wrn_conv3x3(in_channels, out_channels, stride, activate):
"""
3x3 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=stride, padding=1, activate=activate)
class WRNConv(nn.Module):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, activate):
super(WRNConv, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, bias=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
class WRNBottleneckNew(nn.Module):
"""
WRN bottleneck block for residual path in WRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
"""
def __init__(self, in_channels, out_channels, stride, width_factor):
super(WRNBottleneckNew, self).__init__()
mid_channels = int(round(out_channels // 4 * width_factor))
self.conv1 = wrn_conv1x1(in_channels=in_channels, out_channels=
mid_channels, stride=1, activate=True)
self.conv2 = wrn_conv3x3(in_channels=mid_channels, out_channels=
mid_channels, stride=stride, activate=True)
self.conv3 = wrn_conv1x1(in_channels=mid_channels, out_channels=
out_channels, stride=1, activate=False)
def forward(self, input_0):
primals_1 = self.conv1.conv.weight
primals_2 = self.conv1.conv.bias
primals_4 = self.conv2.conv.weight
primals_5 = self.conv2.conv.bias
primals_6 = self.conv3.conv.weight
primals_7 = self.conv3.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| earhian/imgclsmob | WRNBottleneck | false | 6,635 | [
"MIT"
] | 1 | c87c0942420876941868c016211073dec4392e4d | https://github.com/earhian/imgclsmob/tree/c87c0942420876941868c016211073dec4392e4d | import torch
import torch.nn as nn
import torch.utils.data
def wrn_conv1x1(in_channels, out_channels, stride, activate):
"""
1x1 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, stride=stride, padding=0, activate=activate)
def wrn_conv3x3(in_channels, out_channels, stride, activate):
"""
3x3 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=stride, padding=1, activate=activate)
class WRNConv(nn.Module):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, activate):
super().__init__()
self.activate = activate
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, bias=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
class Model(nn.Module):
"""
WRN bottleneck block for residual path in WRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
"""
def __init__(self, in_channels, out_channels, stride, width_factor):
super().__init__()
mid_channels = int(round(out_channels // 4 * width_factor))
self.conv1 = wrn_conv1x1(in_channels=in_channels, out_channels=
mid_channels, stride=1, activate=True)
self.conv2 = wrn_conv3x3(in_channels=mid_channels, out_channels=
mid_channels, stride=stride, activate=True)
self.conv3 = wrn_conv1x1(in_channels=mid_channels, out_channels=
out_channels, stride=1, activate=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'stride': 1,
'width_factor': 4}]
|
SSE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6q/c6qyrmvchep2lyeodxjgze7brt2fv4khvsx2os2smplvfajckxaz.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# x_1 => sigmoid
# x_2 => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(primals_1, buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_1, primals_2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SSE(nn.Module):
"""SSE : Channel Squeeze and Spatial Excitation block.
Paper : <https://arxiv.org/abs/1803.02579>
Adapted from
<https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/66178>
"""
def __init__(self, in_channels):
"""Constructor method for SSE class.
Args:
in_channels : The number of input channels in the feature map.
"""
super(SSE, self).__init__()
self.in_channels = in_channels
self.conv = nn.Conv2d(in_channels=self.in_channels, out_channels=1,
kernel_size=1, stride=1)
def forward(self, x) ->torch.Tensor:
"""Forward Method.
Args:
x: The input tensor of shape (batch, channels, height, width)
Returns:
Tensor of same shape
"""
x_inp = x
x = self.conv(x)
x = torch.sigmoid(x)
x = torch.mul(x_inp, x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](primals_1, buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, buf1
class SSENew(nn.Module):
"""SSE : Channel Squeeze and Spatial Excitation block.
Paper : <https://arxiv.org/abs/1803.02579>
Adapted from
<https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/66178>
"""
def __init__(self, in_channels):
"""Constructor method for SSE class.
Args:
in_channels : The number of input channels in the feature map.
"""
super(SSENew, self).__init__()
self.in_channels = in_channels
self.conv = nn.Conv2d(in_channels=self.in_channels, out_channels=1,
kernel_size=1, stride=1)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| earlbabson/torchflare | SSE | false | 6,636 | [
"Apache-2.0"
] | 1 | 15db06d313a53a3ec4640869335ba87730562b28 | https://github.com/earlbabson/torchflare/tree/15db06d313a53a3ec4640869335ba87730562b28 | import torch
import torch.nn as nn
class Model(nn.Module):
"""SSE : Channel Squeeze and Spatial Excitation block.
Paper : <https://arxiv.org/abs/1803.02579>
Adapted from
<https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/66178>
"""
def __init__(self, in_channels):
"""Constructor method for SSE class.
Args:
in_channels : The number of input channels in the feature map.
"""
super().__init__()
self.in_channels = in_channels
self.conv = nn.Conv2d(in_channels=self.in_channels, out_channels=1,
kernel_size=1, stride=1)
def forward(self, x) ->torch.Tensor:
"""Forward Method.
Args:
x: The input tensor of shape (batch, channels, height, width)
Returns:
Tensor of same shape
"""
x_inp = x
x = self.conv(x)
x = torch.sigmoid(x)
x = torch.mul(x_inp, x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
WRNInitBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xq/cxqkhhst3jbs43bo4t4kdglqlksdycss3wdyjycgxemnfciwj463.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5p/c5pbfru6tjxyj47hlsu4asncufntav2p4cxvrd4h3dnevr2saar2.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + ((-3) + (4*x0)), tmp6 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + ((-2) + (4*x0)), tmp11 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp13 = triton_helpers.maximum(tmp12, tmp7)
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + ((-1) + (4*x0)), tmp18 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp20 = triton_helpers.maximum(tmp19, tmp13)
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + ((-1) + (4*x0)), tmp21 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp23 = triton_helpers.maximum(tmp22, tmp20)
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + (4*x0), tmp24 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp26 = triton_helpers.maximum(tmp25, tmp23)
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + (4*x0)), tmp27 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp29 = triton_helpers.maximum(tmp28, tmp26)
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + (4*x0)), tmp30 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp29)
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + (4*x0)), tmp33 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + (4*x0)), tmp36 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = tmp12 > tmp7
tmp40 = tl.full([1], 1, tl.int8)
tmp41 = tl.full([1], 0, tl.int8)
tmp42 = tl.where(tmp39, tmp40, tmp41)
tmp43 = tmp19 > tmp13
tmp44 = tl.full([1], 2, tl.int8)
tmp45 = tl.where(tmp43, tmp44, tmp42)
tmp46 = tmp22 > tmp20
tmp47 = tl.full([1], 3, tl.int8)
tmp48 = tl.where(tmp46, tmp47, tmp45)
tmp49 = tmp25 > tmp23
tmp50 = tl.full([1], 4, tl.int8)
tmp51 = tl.where(tmp49, tmp50, tmp48)
tmp52 = tmp28 > tmp26
tmp53 = tl.full([1], 5, tl.int8)
tmp54 = tl.where(tmp52, tmp53, tmp51)
tmp55 = tmp31 > tmp29
tmp56 = tl.full([1], 6, tl.int8)
tmp57 = tl.where(tmp55, tmp56, tmp54)
tmp58 = tmp34 > tmp32
tmp59 = tl.full([1], 7, tl.int8)
tmp60 = tl.where(tmp58, tmp59, tmp57)
tmp61 = tmp37 > tmp35
tmp62 = tl.full([1], 8, tl.int8)
tmp63 = tl.where(tmp61, tmp62, tmp60)
tl.store(out_ptr0 + (x0), tmp38, xmask)
tl.store(out_ptr1 + (x0), tmp63, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 16, grid=grid(16), stream=stream0)
return (buf2, primals_1, primals_3, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 7, 7), (196, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class WRNConv(nn.Module):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, activate):
super(WRNConv, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, bias=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
class WRNInitBlock(nn.Module):
"""
WRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self, in_channels, out_channels):
super(WRNInitBlock, self).__init__()
self.conv = WRNConv(in_channels=in_channels, out_channels=
out_channels, kernel_size=7, stride=2, padding=3, activate=True)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + (-3 + 4 * x0), tmp6 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + (-2 + 4 * x0), tmp11 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp13 = triton_helpers.maximum(tmp12, tmp7)
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + (-1 + 4 * x0), tmp18 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp20 = triton_helpers.maximum(tmp19, tmp13)
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + (-1 + 4 * x0), tmp21 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp23 = triton_helpers.maximum(tmp22, tmp20)
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + 4 * x0, tmp24 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp26 = triton_helpers.maximum(tmp25, tmp23)
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + 4 * x0), tmp27 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp29 = triton_helpers.maximum(tmp28, tmp26)
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + 4 * x0), tmp30 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp29)
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + 4 * x0), tmp33 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + 4 * x0), tmp36 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = tmp12 > tmp7
tmp40 = tl.full([1], 1, tl.int8)
tmp41 = tl.full([1], 0, tl.int8)
tmp42 = tl.where(tmp39, tmp40, tmp41)
tmp43 = tmp19 > tmp13
tmp44 = tl.full([1], 2, tl.int8)
tmp45 = tl.where(tmp43, tmp44, tmp42)
tmp46 = tmp22 > tmp20
tmp47 = tl.full([1], 3, tl.int8)
tmp48 = tl.where(tmp46, tmp47, tmp45)
tmp49 = tmp25 > tmp23
tmp50 = tl.full([1], 4, tl.int8)
tmp51 = tl.where(tmp49, tmp50, tmp48)
tmp52 = tmp28 > tmp26
tmp53 = tl.full([1], 5, tl.int8)
tmp54 = tl.where(tmp52, tmp53, tmp51)
tmp55 = tmp31 > tmp29
tmp56 = tl.full([1], 6, tl.int8)
tmp57 = tl.where(tmp55, tmp56, tmp54)
tmp58 = tmp34 > tmp32
tmp59 = tl.full([1], 7, tl.int8)
tmp60 = tl.where(tmp58, tmp59, tmp57)
tmp61 = tmp37 > tmp35
tmp62 = tl.full([1], 8, tl.int8)
tmp63 = tl.where(tmp61, tmp62, tmp60)
tl.store(out_ptr0 + x0, tmp38, xmask)
tl.store(out_ptr1 + x0, tmp63, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(16)](buf1, buf2,
buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
return buf2, primals_1, primals_3, buf1, buf3
class WRNConv(nn.Module):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, activate):
super(WRNConv, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, bias=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
class WRNInitBlockNew(nn.Module):
"""
WRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self, in_channels, out_channels):
super(WRNInitBlockNew, self).__init__()
self.conv = WRNConv(in_channels=in_channels, out_channels=
out_channels, kernel_size=7, stride=2, padding=3, activate=True)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, input_0):
primals_1 = self.conv.conv.weight
primals_2 = self.conv.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| earhian/imgclsmob | WRNInitBlock | false | 6,637 | [
"MIT"
] | 1 | c87c0942420876941868c016211073dec4392e4d | https://github.com/earhian/imgclsmob/tree/c87c0942420876941868c016211073dec4392e4d | import torch
import torch.nn as nn
import torch.utils.data
class WRNConv(nn.Module):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, activate):
super().__init__()
self.activate = activate
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels, kernel_size=kernel_size, stride=stride, padding=
padding, bias=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
class Model(nn.Module):
"""
WRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = WRNConv(in_channels=in_channels, out_channels=
out_channels, kernel_size=7, stride=2, padding=3, activate=True)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
TripletLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zj/czjfayfcarkuoamzfcxjw3kmfgj6fl22etyizczrjgbqk5bwokbq.py
# Topologically Sorted Source Nodes: [x, y], Original ATen: [aten.div]
# Source node to ATen node mapping:
# x => div
# y => div_1
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand_1), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qz/cqzob3vprd66msbqxksowaomykxxjuerzonuix4auynyweifzuqd.py
# Topologically Sorted Source Nodes: [max_1, min_1], Original ATen: [aten.max, aten.min]
# Source node to ATen node mapping:
# max_1 => getitem
# min_1 => getitem_2
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {})
# %getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%min_1, 0), kwargs = {})
triton_poi_fused_max_min_1 = async_compile.triton('triton_poi_fused_max_min_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_min_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_min_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp10 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (1))
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp19 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (2))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp28 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3))
tmp32 = tl.broadcast_to(tmp31, [XBLOCK])
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = tmp1 - tmp2
tmp7 = tmp4 == tmp6
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp3 * tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp1 - tmp11
tmp15 = tmp4 == tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp12 * tmp16
tmp18 = triton_helpers.maximum(tmp9, tmp17)
tmp20 = tmp19 * tmp1
tmp21 = tmp1 - tmp20
tmp24 = tmp4 == tmp23
tmp25 = tmp24.to(tl.float32)
tmp26 = tmp21 * tmp25
tmp27 = triton_helpers.maximum(tmp18, tmp26)
tmp29 = tmp28 * tmp1
tmp30 = tmp1 - tmp29
tmp33 = tmp4 == tmp32
tmp34 = tmp33.to(tl.float32)
tmp35 = tmp30 * tmp34
tmp36 = triton_helpers.maximum(tmp27, tmp35)
tmp37 = tmp4 != tmp6
tmp38 = tmp37.to(tl.float32)
tmp39 = tmp3 * tmp38
tmp40 = 99999999.0
tmp41 = tmp8 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp4 != tmp14
tmp44 = tmp43.to(tl.float32)
tmp45 = tmp12 * tmp44
tmp46 = tmp16 * tmp40
tmp47 = tmp45 + tmp46
tmp48 = triton_helpers.minimum(tmp42, tmp47)
tmp49 = tmp4 != tmp23
tmp50 = tmp49.to(tl.float32)
tmp51 = tmp21 * tmp50
tmp52 = tmp25 * tmp40
tmp53 = tmp51 + tmp52
tmp54 = triton_helpers.minimum(tmp48, tmp53)
tmp55 = tmp4 != tmp32
tmp56 = tmp55.to(tl.float32)
tmp57 = tmp30 * tmp56
tmp58 = tmp34 * tmp40
tmp59 = tmp57 + tmp58
tmp60 = triton_helpers.minimum(tmp54, tmp59)
tl.store(out_ptr0 + (x0), tmp36, xmask)
tl.store(out_ptr1 + (x0), tmp60, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, y], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, mm], Original ATen: [aten.div, aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf4 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [max_1, min_1], Original ATen: [aten.max, aten.min]
triton_poi_fused_max_min_1.run(buf2, arg1_1, buf3, buf4, 4, grid=grid(4), stream=stream0)
del arg1_1
del buf2
buf5 = empty_strided_cuda((0, ), (1, ), torch.float32)
return (buf5, buf4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
def cosine_dist(x, y):
"""Computes Cosine Distance."""
x = F.normalize(x, dim=1)
y = F.normalize(y, dim=1)
dist = 2 - 2 * torch.mm(x, y.t())
return dist
def euclidean_dist(x, y):
"""Computes Euclidean distance."""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(x, 2).sum(1, keepdim=True).expand(m, m).t()
dist = xx + yy - 2 * torch.matmul(x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
def hard_example_mining(distance_matrix, pos_idxs, neg_idxs):
"""For each anchor, find the hardest positive and negative sample.
Args:
distance_matrix: pair wise distance between samples, shape [N, M]
pos_idxs: positive index with shape [N, M]
neg_idxs: negative index with shape [N, M]
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
Note:
Only consider the case in which all targets have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(distance_matrix.size()) == 2
dist_ap, _ = torch.max(distance_matrix * pos_idxs, dim=1)
dist_an, _ = torch.min(distance_matrix * neg_idxs + pos_idxs *
99999999.0, dim=1)
return dist_ap, dist_an
def softmax_weights(dist, mask):
max_v = torch.max(dist * mask, dim=1, keepdim=True)[0]
difference = dist - max_v
z = torch.sum(torch.exp(difference) * mask, dim=1, keepdim=True) + 1e-06
weights = torch.exp(difference) * mask / z
return weights
def weighted_example_mining(distance_matrix, pos_idxs, neg_idxs):
"""For each anchor, find the weighted positive and negative sample.
Args:
distance_matrix: pytorch Variable, pair wise distance between samples, shape [N, N]
pos_idxs:positive index with shape [N, M]
neg_idxs: negative index with shape [N, M]
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
"""
assert len(distance_matrix.size()) == 2
dist_ap = distance_matrix * pos_idxs
dist_an = distance_matrix * neg_idxs
weights_ap = softmax_weights(dist_ap, pos_idxs)
weights_an = softmax_weights(-dist_an, neg_idxs)
dist_ap = torch.sum(dist_ap * weights_ap, dim=1)
dist_an = torch.sum(dist_an * weights_an, dim=1)
return dist_ap, dist_an
class TripletLoss(nn.Module):
"""Computes Triplet loss."""
def __init__(self, normalize_features: 'bool'=True, margin: 'float'=
None, hard_mining: 'bool'=True):
"""Constructor method for TripletLoss.
Args:
normalize_features: Whether to normalize the features. Default = True
margin: The value for margin. Default = None.
hard_mining: Whether to use hard sample mining. Default = True.
"""
super(TripletLoss, self).__init__()
self.normalize_features = normalize_features
self.margin = margin
self.hard_mining = hard_mining
def forward(self, embedding: 'torch.Tensor', targets: 'torch.Tensor'
) ->torch.Tensor:
"""Forward Method.
Args:
embedding: The output of the network.
targets: The targets.
Returns:
The computed Triplet Loss.
"""
distance_matrix = cosine_dist(embedding, embedding
) if self.normalize_features else euclidean_dist(embedding,
embedding)
n = distance_matrix.size(0)
pos_idxs = targets.view(n, 1).expand(n, n).eq(targets.view(n, 1).
expand(n, n).t()).float()
neg_idxs = targets.view(n, 1).expand(n, n).ne(targets.view(n, 1).
expand(n, n).t()).float()
if self.hard_mining:
dist_ap, dist_an = hard_example_mining(distance_matrix=
distance_matrix, pos_idxs=pos_idxs, neg_idxs=neg_idxs)
else:
dist_ap, dist_an = weighted_example_mining(distance_matrix=
distance_matrix, pos_idxs=pos_idxs, neg_idxs=neg_idxs)
y = dist_an.new().resize_as_(dist_an).fill_(1)
if self.margin is not None and self.margin > 0:
loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=self.
margin)
else:
loss = F.soft_margin_loss(dist_an - dist_ap, y)
if loss == float('Inf'):
loss = F.margin_ranking_loss(dist_an, dist_ap, y, margin=0.3)
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 1])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_max_min_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp10 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + 1)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp19 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr1 + 2)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp28 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr1 + 3)
tmp32 = tl.broadcast_to(tmp31, [XBLOCK])
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = tmp1 - tmp2
tmp7 = tmp4 == tmp6
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp3 * tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp1 - tmp11
tmp15 = tmp4 == tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp12 * tmp16
tmp18 = triton_helpers.maximum(tmp9, tmp17)
tmp20 = tmp19 * tmp1
tmp21 = tmp1 - tmp20
tmp24 = tmp4 == tmp23
tmp25 = tmp24.to(tl.float32)
tmp26 = tmp21 * tmp25
tmp27 = triton_helpers.maximum(tmp18, tmp26)
tmp29 = tmp28 * tmp1
tmp30 = tmp1 - tmp29
tmp33 = tmp4 == tmp32
tmp34 = tmp33.to(tl.float32)
tmp35 = tmp30 * tmp34
tmp36 = triton_helpers.maximum(tmp27, tmp35)
tmp37 = tmp4 != tmp6
tmp38 = tmp37.to(tl.float32)
tmp39 = tmp3 * tmp38
tmp40 = 99999999.0
tmp41 = tmp8 * tmp40
tmp42 = tmp39 + tmp41
tmp43 = tmp4 != tmp14
tmp44 = tmp43.to(tl.float32)
tmp45 = tmp12 * tmp44
tmp46 = tmp16 * tmp40
tmp47 = tmp45 + tmp46
tmp48 = triton_helpers.minimum(tmp42, tmp47)
tmp49 = tmp4 != tmp23
tmp50 = tmp49.to(tl.float32)
tmp51 = tmp21 * tmp50
tmp52 = tmp25 * tmp40
tmp53 = tmp51 + tmp52
tmp54 = triton_helpers.minimum(tmp48, tmp53)
tmp55 = tmp4 != tmp32
tmp56 = tmp55.to(tl.float32)
tmp57 = tmp30 * tmp56
tmp58 = tmp34 * tmp40
tmp59 = tmp57 + tmp58
tmp60 = triton_helpers.minimum(tmp54, tmp59)
tl.store(out_ptr0 + x0, tmp36, xmask)
tl.store(out_ptr1 + x0, tmp60, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](arg0_1, buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_max_min_1[grid(4)](buf2, arg1_1, buf3, buf4, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del arg1_1
del buf2
buf5 = empty_strided_cuda((0,), (1,), torch.float32)
return buf5, buf4, buf3
def cosine_dist(x, y):
"""Computes Cosine Distance."""
x = F.normalize(x, dim=1)
y = F.normalize(y, dim=1)
dist = 2 - 2 * torch.mm(x, y.t())
return dist
def euclidean_dist(x, y):
"""Computes Euclidean distance."""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(x, 2).sum(1, keepdim=True).expand(m, m).t()
dist = xx + yy - 2 * torch.matmul(x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
def hard_example_mining(distance_matrix, pos_idxs, neg_idxs):
"""For each anchor, find the hardest positive and negative sample.
Args:
distance_matrix: pair wise distance between samples, shape [N, M]
pos_idxs: positive index with shape [N, M]
neg_idxs: negative index with shape [N, M]
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
Note:
Only consider the case in which all targets have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(distance_matrix.size()) == 2
dist_ap, _ = torch.max(distance_matrix * pos_idxs, dim=1)
dist_an, _ = torch.min(distance_matrix * neg_idxs + pos_idxs *
99999999.0, dim=1)
return dist_ap, dist_an
def softmax_weights(dist, mask):
max_v = torch.max(dist * mask, dim=1, keepdim=True)[0]
difference = dist - max_v
z = torch.sum(torch.exp(difference) * mask, dim=1, keepdim=True) + 1e-06
weights = torch.exp(difference) * mask / z
return weights
def weighted_example_mining(distance_matrix, pos_idxs, neg_idxs):
"""For each anchor, find the weighted positive and negative sample.
Args:
distance_matrix: pytorch Variable, pair wise distance between samples, shape [N, N]
pos_idxs:positive index with shape [N, M]
neg_idxs: negative index with shape [N, M]
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
"""
assert len(distance_matrix.size()) == 2
dist_ap = distance_matrix * pos_idxs
dist_an = distance_matrix * neg_idxs
weights_ap = softmax_weights(dist_ap, pos_idxs)
weights_an = softmax_weights(-dist_an, neg_idxs)
dist_ap = torch.sum(dist_ap * weights_ap, dim=1)
dist_an = torch.sum(dist_an * weights_an, dim=1)
return dist_ap, dist_an
class TripletLossNew(nn.Module):
"""Computes Triplet loss."""
def __init__(self, normalize_features: 'bool'=True, margin: 'float'=
None, hard_mining: 'bool'=True):
"""Constructor method for TripletLoss.
Args:
normalize_features: Whether to normalize the features. Default = True
margin: The value for margin. Default = None.
hard_mining: Whether to use hard sample mining. Default = True.
"""
super(TripletLossNew, self).__init__()
self.normalize_features = normalize_features
self.margin = margin
self.hard_mining = hard_mining
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| earlbabson/torchflare | TripletLoss | false | 6,638 | [
"Apache-2.0"
] | 1 | 15db06d313a53a3ec4640869335ba87730562b28 | https://github.com/earlbabson/torchflare/tree/15db06d313a53a3ec4640869335ba87730562b28 | import torch
import torch.nn.functional as F
import torch.nn as nn
def cosine_dist(x, y):
"""Computes Cosine Distance."""
x = F.normalize(x, dim=1)
y = F.normalize(y, dim=1)
dist = 2 - 2 * torch.mm(x, y.t())
return dist
def euclidean_dist(x, y):
"""Computes Euclidean distance."""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(x, 2).sum(1, keepdim=True).expand(m, m).t()
dist = xx + yy - 2 * torch.matmul(x, y.t())
dist = dist.clamp(min=1e-12).sqrt()
return dist
def hard_example_mining(distance_matrix, pos_idxs, neg_idxs):
"""For each anchor, find the hardest positive and negative sample.
Args:
distance_matrix: pair wise distance between samples, shape [N, M]
pos_idxs: positive index with shape [N, M]
neg_idxs: negative index with shape [N, M]
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
Note:
Only consider the case in which all targets have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(distance_matrix.size()) == 2
dist_ap, _ = torch.max(distance_matrix * pos_idxs, dim=1)
dist_an, _ = torch.min(distance_matrix * neg_idxs + pos_idxs *
99999999.0, dim=1)
return dist_ap, dist_an
def softmax_weights(dist, mask):
max_v = torch.max(dist * mask, dim=1, keepdim=True)[0]
difference = dist - max_v
z = torch.sum(torch.exp(difference) * mask, dim=1, keepdim=True) + 1e-06
weights = torch.exp(difference) * mask / z
return weights
def weighted_example_mining(distance_matrix, pos_idxs, neg_idxs):
"""For each anchor, find the weighted positive and negative sample.
Args:
distance_matrix: pytorch Variable, pair wise distance between samples, shape [N, N]
pos_idxs:positive index with shape [N, M]
neg_idxs: negative index with shape [N, M]
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
"""
assert len(distance_matrix.size()) == 2
dist_ap = distance_matrix * pos_idxs
dist_an = distance_matrix * neg_idxs
weights_ap = softmax_weights(dist_ap, pos_idxs)
weights_an = softmax_weights(-dist_an, neg_idxs)
dist_ap = torch.sum(dist_ap * weights_ap, dim=1)
dist_an = torch.sum(dist_an * weights_an, dim=1)
return dist_ap, dist_an
class Model(nn.Module):
"""Computes Triplet loss."""
def __init__(self, normalize_features: 'bool'=True, margin: 'float'=
None, hard_mining: 'bool'=True):
"""Constructor method for TripletLoss.
Args:
normalize_features: Whether to normalize the features. Default = True
margin: The value for margin. Default = None.
hard_mining: Whether to use hard sample mining. Default = True.
"""
super().__init__()
self.normalize_features = normalize_features
self.margin = margin
self.hard_mining = hard_mining
def forward(self, embedding: 'torch.Tensor', targets: 'torch.Tensor'
) ->torch.Tensor:
"""Forward Method.
Args:
embedding: The output of the network.
targets: The targets.
Returns:
The computed Triplet Loss.
"""
distance_matrix = cosine_dist(embedding, embedding
) if self.normalize_features else euclidean_dist(embedding,
embedding)
n = distance_matrix.size(0)
pos_idxs = targets.view(n, 1).expand(n, n).eq(targets.view(
# ... truncated (>4000 chars) for memory efficiency |
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/36/c36op2nzojy3jww5tx362tasmjgqempu7qy24lnkuda4gfmzksed.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => clone
# Graph fragment:
# %clone : [num_users=4] = call_function[target=torch.ops.aten.clone.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/44/c44js2ihmdrcz7tge4apeqkgsrg2krjyvucxxcvnq3h3o4c57ho5.py
# Topologically Sorted Source Nodes: [mul, true_positives, sub, mul_1, false_positives, sub_1, mul_2, false_negatives], Original ATen: [aten.mul, aten.sum, aten.rsub]
# Source node to ATen node mapping:
# false_negatives => sum_3
# false_positives => sum_2
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# sub_1 => sub_1
# true_positives => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone, %clone_1), kwargs = {})
# %sum_1 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [0, 2, 3]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clone_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone, %sub), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [0, 2, 3]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clone), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone_1, %sub_1), kwargs = {})
# %sum_3 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [0, 2, 3]), kwargs = {})
triton_per_fused_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = (rindex // 16)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0) + (64*r2)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0) + (64*r2)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp7 - tmp1
tmp9 = tmp0 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp7 - tmp0
tmp15 = tmp1 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
tl.store(out_ptr2 + (x0), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sc/cscpcrvrzpgwooh5yryqd667grljkeunjup3nzkhmsu32a2u6zv6.py
# Topologically Sorted Source Nodes: [mul_3, add, union, eq, float_1, mul_4, add_2, mul_5, add_3, add_4, add_5, score, mean, sub_2], Original ATen: [aten.mul, aten.add, aten.eq, aten._to_copy, aten.div, aten.mean, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# eq => eq
# float_1 => convert_element_type
# mean => mean
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# score => div
# sub_2 => sub_2
# union => add_1
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %sum_3), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_1, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 1e-20), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %sum_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %sum_3), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, 1e-20), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %add_5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mean), kwargs = {})
triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2 = async_compile.triton('triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp5 = tl.load(in_ptr2 + (r0), None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = tmp0 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp6 == tmp7
tmp9 = tmp8.to(tl.float32)
tmp10 = 1e-20
tmp11 = tmp9 * tmp10
tmp12 = tmp2 + tmp11
tmp13 = tmp2 + tmp3
tmp14 = tmp13 + tmp5
tmp15 = tmp14 + tmp10
tmp16 = tmp12 / tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp20 = 4.0
tmp21 = tmp19 / tmp20
tmp22 = 1.0
tmp23 = tmp22 - tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf4 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul, true_positives, sub, mul_1, false_positives, sub_1, mul_2, false_negatives], Original ATen: [aten.mul, aten.sum, aten.rsub]
triton_per_fused_mul_rsub_sum_1.run(buf0, buf1, buf2, buf3, buf4, 4, 64, grid=grid(4), stream=stream0)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [mul_3, add, union, eq, float_1, mul_4, add_2, mul_5, add_3, add_4, add_5, score, mean, sub_2], Original ATen: [aten.mul, aten.add, aten.eq, aten._to_copy, aten.div, aten.mean, aten.rsub]
triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2.run(buf6, buf2, buf3, buf4, 1, 4, grid=grid(1), stream=stream0)
del buf2
del buf3
del buf4
return (buf6, buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def calculate_segmentation_statistics(outputs: 'torch.Tensor', targets:
'torch.Tensor', class_dim: 'int'=1, threshold=None):
"""Compute calculate segmentation statistics.
Args:
outputs: torch.Tensor.
targets: torch.Tensor.
threshold: threshold for binarization of predictions.
class_dim: indicates class dimension (K).
Returns:
True positives , false positives , false negatives for segmentation task.
"""
num_dims = len(outputs.shape)
assert num_dims > 2, 'Found only two dimensions, shape should be [bs , C , ...]'
assert outputs.shape == targets.shape, 'shape mismatch'
if threshold is not None:
outputs = (outputs > threshold).float()
dims = [dim for dim in range(num_dims) if dim != class_dim]
true_positives = torch.sum(outputs * targets, dim=dims)
false_positives = torch.sum(outputs * (1 - targets), dim=dims)
false_negatives = torch.sum(targets * (1 - outputs), dim=dims)
return true_positives, false_positives, false_negatives
class MetricMeter:
"""Base Class to structuring your metrics."""
def accumulate(self, outputs, targets):
"""Method to accumulate outputs and targets per the batch."""
raise NotImplementedError
def compute(self):
"""Method to compute the metric on epoch end."""
raise NotImplementedError
def reset(self):
"""Method to reset the accumulation lists."""
raise NotImplementedError
class DiceScore(MetricMeter):
"""Class to compute Dice Score."""
def __init__(self, threshold: 'float'=None, class_dim: 'int'=1):
"""Constructor method for DiceScore.
Args:
threshold: threshold for binarization of predictions
class_dim: indicates class dimension (K)
Note:
Supports only binary cases
"""
self.threshold = threshold
self.class_dim = class_dim
self.eps = 1e-20
self._outputs = []
self._targets = []
self.reset()
def handle(self) ->str:
"""Method to get the class name.
Returns:
The class name
"""
return self.__class__.__name__.lower()
def accumulate(self, outputs: 'torch.Tensor', targets: 'torch.Tensor'):
"""Class to accumulate the outputs and targets.
Args:
outputs: [N, K, ...] tensor that for each of the N samples
indicates the probability of the sample belonging to each of
the K num_classes.
targets: binary [N, K, ...] tensor that encodes which of the K
num_classes are associated with the N-th sample.
"""
self._outputs.append(outputs)
self._targets.append(targets)
def compute(self) ->torch.Tensor:
"""Computes the dice score.
Returns:
The computed Dice score.
"""
self._outputs = torch.cat(self._outputs)
self._targets = torch.cat(self._targets)
tp, fp, fn = calculate_segmentation_statistics(outputs=self.
_outputs, targets=self._targets, threshold=self.threshold,
class_dim=self.class_dim)
union = tp + fp + fn
score = (2 * tp + self.eps * (union == 0).float()) / (2 * tp + fp +
fn + self.eps)
return torch.mean(score)
def reset(self):
"""Resets the accumulation lists."""
self._outputs = []
self._targets = []
class DiceLoss(nn.Module):
"""Implementation of Dice Loss."""
def __init__(self, class_dim=1):
"""Constructor method for Dice Loss.
Args:
class_dim: The dimension indication class.
"""
super(DiceLoss, self).__init__()
self.dice = DiceScore(threshold=None, class_dim=class_dim)
def forward(self, outputs: 'torch.Tensor', targets: 'torch.Tensor'
) ->torch.Tensor:
"""Forward method.
Args:
outputs: outputs from the net after applying activations.
targets: The targets.
Returns:
The computed loss value.
"""
self.dice.reset()
self.dice.accumulate(outputs=outputs, targets=targets)
return 1 - self.dice.compute()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_per_fused_mul_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = rindex // 16
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp7 - tmp1
tmp9 = tmp0 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp7 - tmp0
tmp15 = tmp1 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
tl.store(out_ptr2 + x0, tmp19, xmask)
@triton.jit
def triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp5 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = tmp0 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp6 == tmp7
tmp9 = tmp8.to(tl.float32)
tmp10 = 1e-20
tmp11 = tmp9 * tmp10
tmp12 = tmp2 + tmp11
tmp13 = tmp2 + tmp3
tmp14 = tmp13 + tmp5
tmp15 = tmp14 + tmp10
tmp16 = tmp12 / tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp20 = 4.0
tmp21 = tmp19 / tmp20
tmp22 = 1.0
tmp23 = tmp22 - tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_cat_0[grid(256)](arg1_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mul_rsub_sum_1[grid(4)](buf0, buf1, buf2, buf3,
buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2[grid(1)](buf6,
buf2, buf3, buf4, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf3
del buf4
return buf6, buf1, buf0
def calculate_segmentation_statistics(outputs: 'torch.Tensor', targets:
'torch.Tensor', class_dim: 'int'=1, threshold=None):
"""Compute calculate segmentation statistics.
Args:
outputs: torch.Tensor.
targets: torch.Tensor.
threshold: threshold for binarization of predictions.
class_dim: indicates class dimension (K).
Returns:
True positives , false positives , false negatives for segmentation task.
"""
num_dims = len(outputs.shape)
assert num_dims > 2, 'Found only two dimensions, shape should be [bs , C , ...]'
assert outputs.shape == targets.shape, 'shape mismatch'
if threshold is not None:
outputs = (outputs > threshold).float()
dims = [dim for dim in range(num_dims) if dim != class_dim]
true_positives = torch.sum(outputs * targets, dim=dims)
false_positives = torch.sum(outputs * (1 - targets), dim=dims)
false_negatives = torch.sum(targets * (1 - outputs), dim=dims)
return true_positives, false_positives, false_negatives
class MetricMeter:
"""Base Class to structuring your metrics."""
def accumulate(self, outputs, targets):
"""Method to accumulate outputs and targets per the batch."""
raise NotImplementedError
def compute(self):
"""Method to compute the metric on epoch end."""
raise NotImplementedError
def reset(self):
"""Method to reset the accumulation lists."""
raise NotImplementedError
class DiceScore(MetricMeter):
"""Class to compute Dice Score."""
def __init__(self, threshold: 'float'=None, class_dim: 'int'=1):
"""Constructor method for DiceScore.
Args:
threshold: threshold for binarization of predictions
class_dim: indicates class dimension (K)
Note:
Supports only binary cases
"""
self.threshold = threshold
self.class_dim = class_dim
self.eps = 1e-20
self._outputs = []
self._targets = []
self.reset()
def handle(self) ->str:
"""Method to get the class name.
Returns:
The class name
"""
return self.__class__.__name__.lower()
def accumulate(self, outputs: 'torch.Tensor', targets: 'torch.Tensor'):
"""Class to accumulate the outputs and targets.
Args:
outputs: [N, K, ...] tensor that for each of the N samples
indicates the probability of the sample belonging to each of
the K num_classes.
targets: binary [N, K, ...] tensor that encodes which of the K
num_classes are associated with the N-th sample.
"""
self._outputs.append(outputs)
self._targets.append(targets)
def compute(self) ->torch.Tensor:
"""Computes the dice score.
Returns:
The computed Dice score.
"""
self._outputs = torch.cat(self._outputs)
self._targets = torch.cat(self._targets)
tp, fp, fn = calculate_segmentation_statistics(outputs=self.
_outputs, targets=self._targets, threshold=self.threshold,
class_dim=self.class_dim)
union = tp + fp + fn
score = (2 * tp + self.eps * (union == 0).float()) / (2 * tp + fp +
fn + self.eps)
return torch.mean(score)
def reset(self):
"""Resets the accumulation lists."""
self._outputs = []
self._targets = []
class DiceLossNew(nn.Module):
"""Implementation of Dice Loss."""
def __init__(self, class_dim=1):
"""Constructor method for Dice Loss.
Args:
class_dim: The dimension indication class.
"""
super(DiceLossNew, self).__init__()
self.dice = DiceScore(threshold=None, class_dim=class_dim)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| earlbabson/torchflare | DiceLoss | false | 6,639 | [
"Apache-2.0"
] | 1 | 15db06d313a53a3ec4640869335ba87730562b28 | https://github.com/earlbabson/torchflare/tree/15db06d313a53a3ec4640869335ba87730562b28 | import torch
import torch.nn as nn
def calculate_segmentation_statistics(outputs: 'torch.Tensor', targets:
'torch.Tensor', class_dim: 'int'=1, threshold=None):
"""Compute calculate segmentation statistics.
Args:
outputs: torch.Tensor.
targets: torch.Tensor.
threshold: threshold for binarization of predictions.
class_dim: indicates class dimension (K).
Returns:
True positives , false positives , false negatives for segmentation task.
"""
num_dims = len(outputs.shape)
assert num_dims > 2, 'Found only two dimensions, shape should be [bs , C , ...]'
assert outputs.shape == targets.shape, 'shape mismatch'
if threshold is not None:
outputs = (outputs > threshold).float()
dims = [dim for dim in range(num_dims) if dim != class_dim]
true_positives = torch.sum(outputs * targets, dim=dims)
false_positives = torch.sum(outputs * (1 - targets), dim=dims)
false_negatives = torch.sum(targets * (1 - outputs), dim=dims)
return true_positives, false_positives, false_negatives
class MetricMeter:
"""Base Class to structuring your metrics."""
def accumulate(self, outputs, targets):
"""Method to accumulate outputs and targets per the batch."""
raise NotImplementedError
def compute(self):
"""Method to compute the metric on epoch end."""
raise NotImplementedError
def reset(self):
"""Method to reset the accumulation lists."""
raise NotImplementedError
class DiceScore(MetricMeter):
"""Class to compute Dice Score."""
def __init__(self, threshold: 'float'=None, class_dim: 'int'=1):
"""Constructor method for DiceScore.
Args:
threshold: threshold for binarization of predictions
class_dim: indicates class dimension (K)
Note:
Supports only binary cases
"""
self.threshold = threshold
self.class_dim = class_dim
self.eps = 1e-20
self._outputs = []
self._targets = []
self.reset()
def handle(self) ->str:
"""Method to get the class name.
Returns:
The class name
"""
return self.__class__.__name__.lower()
def accumulate(self, outputs: 'torch.Tensor', targets: 'torch.Tensor'):
"""Class to accumulate the outputs and targets.
Args:
outputs: [N, K, ...] tensor that for each of the N samples
indicates the probability of the sample belonging to each of
the K num_classes.
targets: binary [N, K, ...] tensor that encodes which of the K
num_classes are associated with the N-th sample.
"""
self._outputs.append(outputs)
self._targets.append(targets)
def compute(self) ->torch.Tensor:
"""Computes the dice score.
Returns:
The computed Dice score.
"""
self._outputs = torch.cat(self._outputs)
self._targets = torch.cat(self._targets)
tp, fp, fn = calculate_segmentation_statistics(outputs=self.
_outputs, targets=self._targets, threshold=self.threshold,
class_dim=self.class_dim)
union = tp + fp + fn
score = (2 * tp + self.eps * (union == 0).float()) / (2 * tp + fp +
fn + self.eps)
return torch.mean(score)
def reset(self):
"""Resets the accumulation lists."""
self._outputs = []
self._targets = []
class Model(nn.Module):
"""Implementation of Dice Loss."""
def __init__(self, class_dim=1):
"""Constructor method for Dice Loss.
Args:
class_dim: The dimension indication class.
"""
super().__init__()
self.dice = DiceScore(threshold=None, class_dim=class_dim)
def forward(self, outputs: 'torch.Tensor', targets: 'torch.Tensor'
) ->torch.Tensor:
"""Forward method.
Args:
outpu
# ... truncated (>4000 chars) for memory efficiency |
Classifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sa/csap5cthikzuwdmqu2ehow73ilvezq4ym46wfjbhn3cebigfe7xy.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.softplus]
# Source node to ATen node mapping:
# hidden => div, exp, gt, log1p, mul, where
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 20.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %div), kwargs = {})
triton_poi_fused_softplus_0 = async_compile.triton('triton_poi_fused_softplus_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# scores => amax, exp_1, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# scores => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.softplus]
stream0 = get_raw_stream(0)
triton_poi_fused_softplus_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self, z_dim, hidden_dim, class_dim):
super().__init__()
self.fc1 = nn.Linear(z_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, class_dim)
self.softplus = nn.Softplus()
self.softmax = nn.Softmax(dim=1)
def forward(self, z):
hidden = self.softplus(self.fc1(z))
scores = self.softmax(self.fc2(hidden))
return scores
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'z_dim': 4, 'hidden_dim': 4, 'class_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4
class ClassifierNew(nn.Module):
def __init__(self, z_dim, hidden_dim, class_dim):
super().__init__()
self.fc1 = nn.Linear(z_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, class_dim)
self.softplus = nn.Softplus()
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| einbandi/samplednn | Classifier | false | 6,640 | [
"MIT"
] | 1 | 3525e46ab5096a569dde40e5a10d6ee05128ec7d | https://github.com/einbandi/samplednn/tree/3525e46ab5096a569dde40e5a10d6ee05128ec7d | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, z_dim, hidden_dim, class_dim):
super().__init__()
self.fc1 = nn.Linear(z_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, class_dim)
self.softplus = nn.Softplus()
self.softmax = nn.Softmax(dim=1)
def forward(self, z):
hidden = self.softplus(self.fc1(z))
scores = self.softmax(self.fc2(hidden))
return scores
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
IOULoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/36/c36op2nzojy3jww5tx362tasmjgqempu7qy24lnkuda4gfmzksed.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => clone
# Graph fragment:
# %clone : [num_users=4] = call_function[target=torch.ops.aten.clone.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/44/c44js2ihmdrcz7tge4apeqkgsrg2krjyvucxxcvnq3h3o4c57ho5.py
# Topologically Sorted Source Nodes: [mul, true_positives, sub, mul_1, false_positives, sub_1, mul_2, false_negatives], Original ATen: [aten.mul, aten.sum, aten.rsub]
# Source node to ATen node mapping:
# false_negatives => sum_3
# false_positives => sum_2
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# sub_1 => sub_1
# true_positives => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone, %clone_1), kwargs = {})
# %sum_1 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [0, 2, 3]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clone_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone, %sub), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [0, 2, 3]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %clone), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone_1, %sub_1), kwargs = {})
# %sum_3 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [0, 2, 3]), kwargs = {})
triton_per_fused_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = (rindex // 16)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0) + (64*r2)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0) + (64*r2)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp7 - tmp1
tmp9 = tmp0 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp7 - tmp0
tmp15 = tmp1 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
tl.store(out_ptr2 + (x0), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2q/c2qiwsdsiabhrmwd55slc4645jvsazmtjuiyfnyvgt4tmi2osxvz.py
# Topologically Sorted Source Nodes: [add, union, eq, float_1, mul_3, add_2, add_3, add_4, add_5, score, mean, sub_2], Original ATen: [aten.add, aten.eq, aten._to_copy, aten.mul, aten.div, aten.mean, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_2 => add_2
# add_3 => add_3
# add_4 => add_4
# add_5 => add_5
# eq => eq
# float_1 => convert_element_type
# mean => mean
# mul_3 => mul_3
# score => div
# sub_2 => sub_2
# union => add_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %sum_3), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_1, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 1e-20), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %mul_3), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %sum_3), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, 1e-20), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %add_5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mean), kwargs = {})
triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2 = async_compile.triton('triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp3 = tl.load(in_ptr2 + (r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 == tmp5
tmp7 = tmp6.to(tl.float32)
tmp8 = 1e-20
tmp9 = tmp7 * tmp8
tmp10 = tmp0 + tmp9
tmp11 = tmp4 + tmp8
tmp12 = tmp10 / tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = 4.0
tmp17 = tmp15 / tmp16
tmp18 = 1.0
tmp19 = tmp18 - tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf3 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf4 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul, true_positives, sub, mul_1, false_positives, sub_1, mul_2, false_negatives], Original ATen: [aten.mul, aten.sum, aten.rsub]
triton_per_fused_mul_rsub_sum_1.run(buf0, buf1, buf2, buf3, buf4, 4, 64, grid=grid(4), stream=stream0)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [add, union, eq, float_1, mul_3, add_2, add_3, add_4, add_5, score, mean, sub_2], Original ATen: [aten.add, aten.eq, aten._to_copy, aten.mul, aten.div, aten.mean, aten.rsub]
triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2.run(buf6, buf2, buf3, buf4, 1, 4, grid=grid(1), stream=stream0)
del buf2
del buf3
del buf4
return (buf6, buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def calculate_segmentation_statistics(outputs: 'torch.Tensor', targets:
'torch.Tensor', class_dim: 'int'=1, threshold=None):
"""Compute calculate segmentation statistics.
Args:
outputs: torch.Tensor.
targets: torch.Tensor.
threshold: threshold for binarization of predictions.
class_dim: indicates class dimension (K).
Returns:
True positives , false positives , false negatives for segmentation task.
"""
num_dims = len(outputs.shape)
assert num_dims > 2, 'Found only two dimensions, shape should be [bs , C , ...]'
assert outputs.shape == targets.shape, 'shape mismatch'
if threshold is not None:
outputs = (outputs > threshold).float()
dims = [dim for dim in range(num_dims) if dim != class_dim]
true_positives = torch.sum(outputs * targets, dim=dims)
false_positives = torch.sum(outputs * (1 - targets), dim=dims)
false_negatives = torch.sum(targets * (1 - outputs), dim=dims)
return true_positives, false_positives, false_negatives
class MetricMeter:
"""Base Class to structuring your metrics."""
def accumulate(self, outputs, targets):
"""Method to accumulate outputs and targets per the batch."""
raise NotImplementedError
def compute(self):
"""Method to compute the metric on epoch end."""
raise NotImplementedError
def reset(self):
"""Method to reset the accumulation lists."""
raise NotImplementedError
class IOU(MetricMeter):
"""Class which computes intersection over union."""
def __init__(self, threshold: 'float'=None, class_dim: 'int'=1):
"""Constructor method for IOU.
Args:
threshold: threshold for binarization of predictions
class_dim: indicates class dimension (K)
Note:
Supports only binary cases
"""
self.threshold = threshold
self.class_dim = class_dim
self.eps = 1e-20
self._outputs = []
self._targets = []
self.reset()
def handle(self) ->str:
"""Method to get the class name.
Returns:
The class name
"""
return self.__class__.__name__.lower()
def accumulate(self, outputs: 'torch.Tensor', targets: 'torch.Tensor'):
"""Method to accumulate the outputs and targets.
Args:
outputs: [N, K, ...] tensor that for each of the N samples
indicates the probability of the sample belonging to each of
the K num_classes.
targets: binary [N, K, ...] tensor that encodes which of the K
num_classes are associated with the N-th sample.
"""
self._outputs.append(outputs)
self._targets.append(targets)
def compute(self) ->torch.Tensor:
"""Method to Compute IOU.
Returns:
The computed iou.
"""
self._outputs = torch.cat(self._outputs)
self._targets = torch.cat(self._targets)
tp, fp, fn = calculate_segmentation_statistics(outputs=self.
_outputs, targets=self._targets, threshold=self.threshold,
class_dim=self.class_dim)
union = tp + fp + fn
score = (tp + self.eps * (union == 0).float()) / (tp + fp + fn +
self.eps)
return torch.mean(score)
def reset(self):
"""Method to reset the accumulation lists."""
self._outputs = []
self._targets = []
class IOULoss(nn.Module):
"""Computes intersection over union Loss.
IOULoss = 1 - iou_score
"""
def __init__(self, class_dim=1):
"""Constructor method for IOULoss.
Args:
class_dim: indicates class dimension (K) for
outputs and targets tensors (default = 1)
"""
super(IOULoss, self).__init__()
self.iou = IOU(threshold=None, class_dim=class_dim)
def forward(self, outputs: 'torch.Tensor', targets: 'torch.Tensor'
) ->torch.Tensor:
"""Forward Method.
Args:
outputs: outputs from the net after applying activations.
targets: The targets.
Returns:
The computed loss value.
"""
self.iou.reset()
self.iou.accumulate(outputs=outputs, targets=targets)
return 1 - self.iou.compute()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_per_fused_mul_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = rindex // 16
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = 1.0
tmp8 = tmp7 - tmp1
tmp9 = tmp0 * tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp7 - tmp0
tmp15 = tmp1 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
tl.store(out_ptr2 + x0, tmp19, xmask)
@triton.jit
def triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp3 = tl.load(in_ptr2 + r0, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 == tmp5
tmp7 = tmp6.to(tl.float32)
tmp8 = 1e-20
tmp9 = tmp7 * tmp8
tmp10 = tmp0 + tmp9
tmp11 = tmp4 + tmp8
tmp12 = tmp10 / tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tmp16 = 4.0
tmp17 = tmp15 / tmp16
tmp18 = 1.0
tmp19 = tmp18 - tmp17
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_cat_0[grid(256)](arg1_1, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
buf3 = empty_strided_cuda((4,), (1,), torch.float32)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
triton_per_fused_mul_rsub_sum_1[grid(4)](buf0, buf1, buf2, buf3,
buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
triton_per_fused__to_copy_add_div_eq_mean_mul_rsub_2[grid(1)](buf6,
buf2, buf3, buf4, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf3
del buf4
return buf6, buf1, buf0
def calculate_segmentation_statistics(outputs: 'torch.Tensor', targets:
'torch.Tensor', class_dim: 'int'=1, threshold=None):
"""Compute calculate segmentation statistics.
Args:
outputs: torch.Tensor.
targets: torch.Tensor.
threshold: threshold for binarization of predictions.
class_dim: indicates class dimension (K).
Returns:
True positives , false positives , false negatives for segmentation task.
"""
num_dims = len(outputs.shape)
assert num_dims > 2, 'Found only two dimensions, shape should be [bs , C , ...]'
assert outputs.shape == targets.shape, 'shape mismatch'
if threshold is not None:
outputs = (outputs > threshold).float()
dims = [dim for dim in range(num_dims) if dim != class_dim]
true_positives = torch.sum(outputs * targets, dim=dims)
false_positives = torch.sum(outputs * (1 - targets), dim=dims)
false_negatives = torch.sum(targets * (1 - outputs), dim=dims)
return true_positives, false_positives, false_negatives
class MetricMeter:
"""Base Class to structuring your metrics."""
def accumulate(self, outputs, targets):
"""Method to accumulate outputs and targets per the batch."""
raise NotImplementedError
def compute(self):
"""Method to compute the metric on epoch end."""
raise NotImplementedError
def reset(self):
"""Method to reset the accumulation lists."""
raise NotImplementedError
class IOU(MetricMeter):
"""Class which computes intersection over union."""
def __init__(self, threshold: 'float'=None, class_dim: 'int'=1):
"""Constructor method for IOU.
Args:
threshold: threshold for binarization of predictions
class_dim: indicates class dimension (K)
Note:
Supports only binary cases
"""
self.threshold = threshold
self.class_dim = class_dim
self.eps = 1e-20
self._outputs = []
self._targets = []
self.reset()
def handle(self) ->str:
"""Method to get the class name.
Returns:
The class name
"""
return self.__class__.__name__.lower()
def accumulate(self, outputs: 'torch.Tensor', targets: 'torch.Tensor'):
"""Method to accumulate the outputs and targets.
Args:
outputs: [N, K, ...] tensor that for each of the N samples
indicates the probability of the sample belonging to each of
the K num_classes.
targets: binary [N, K, ...] tensor that encodes which of the K
num_classes are associated with the N-th sample.
"""
self._outputs.append(outputs)
self._targets.append(targets)
def compute(self) ->torch.Tensor:
"""Method to Compute IOU.
Returns:
The computed iou.
"""
self._outputs = torch.cat(self._outputs)
self._targets = torch.cat(self._targets)
tp, fp, fn = calculate_segmentation_statistics(outputs=self.
_outputs, targets=self._targets, threshold=self.threshold,
class_dim=self.class_dim)
union = tp + fp + fn
score = (tp + self.eps * (union == 0).float()) / (tp + fp + fn +
self.eps)
return torch.mean(score)
def reset(self):
"""Method to reset the accumulation lists."""
self._outputs = []
self._targets = []
class IOULossNew(nn.Module):
"""Computes intersection over union Loss.
IOULoss = 1 - iou_score
"""
def __init__(self, class_dim=1):
"""Constructor method for IOULoss.
Args:
class_dim: indicates class dimension (K) for
outputs and targets tensors (default = 1)
"""
super(IOULossNew, self).__init__()
self.iou = IOU(threshold=None, class_dim=class_dim)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| earlbabson/torchflare | IOULoss | false | 6,641 | [
"Apache-2.0"
] | 1 | 15db06d313a53a3ec4640869335ba87730562b28 | https://github.com/earlbabson/torchflare/tree/15db06d313a53a3ec4640869335ba87730562b28 | import torch
import torch.nn as nn
def calculate_segmentation_statistics(outputs: 'torch.Tensor', targets:
'torch.Tensor', class_dim: 'int'=1, threshold=None):
"""Compute calculate segmentation statistics.
Args:
outputs: torch.Tensor.
targets: torch.Tensor.
threshold: threshold for binarization of predictions.
class_dim: indicates class dimension (K).
Returns:
True positives , false positives , false negatives for segmentation task.
"""
num_dims = len(outputs.shape)
assert num_dims > 2, 'Found only two dimensions, shape should be [bs , C , ...]'
assert outputs.shape == targets.shape, 'shape mismatch'
if threshold is not None:
outputs = (outputs > threshold).float()
dims = [dim for dim in range(num_dims) if dim != class_dim]
true_positives = torch.sum(outputs * targets, dim=dims)
false_positives = torch.sum(outputs * (1 - targets), dim=dims)
false_negatives = torch.sum(targets * (1 - outputs), dim=dims)
return true_positives, false_positives, false_negatives
class MetricMeter:
"""Base Class to structuring your metrics."""
def accumulate(self, outputs, targets):
"""Method to accumulate outputs and targets per the batch."""
raise NotImplementedError
def compute(self):
"""Method to compute the metric on epoch end."""
raise NotImplementedError
def reset(self):
"""Method to reset the accumulation lists."""
raise NotImplementedError
class IOU(MetricMeter):
"""Class which computes intersection over union."""
def __init__(self, threshold: 'float'=None, class_dim: 'int'=1):
"""Constructor method for IOU.
Args:
threshold: threshold for binarization of predictions
class_dim: indicates class dimension (K)
Note:
Supports only binary cases
"""
self.threshold = threshold
self.class_dim = class_dim
self.eps = 1e-20
self._outputs = []
self._targets = []
self.reset()
def handle(self) ->str:
"""Method to get the class name.
Returns:
The class name
"""
return self.__class__.__name__.lower()
def accumulate(self, outputs: 'torch.Tensor', targets: 'torch.Tensor'):
"""Method to accumulate the outputs and targets.
Args:
outputs: [N, K, ...] tensor that for each of the N samples
indicates the probability of the sample belonging to each of
the K num_classes.
targets: binary [N, K, ...] tensor that encodes which of the K
num_classes are associated with the N-th sample.
"""
self._outputs.append(outputs)
self._targets.append(targets)
def compute(self) ->torch.Tensor:
"""Method to Compute IOU.
Returns:
The computed iou.
"""
self._outputs = torch.cat(self._outputs)
self._targets = torch.cat(self._targets)
tp, fp, fn = calculate_segmentation_statistics(outputs=self.
_outputs, targets=self._targets, threshold=self.threshold,
class_dim=self.class_dim)
union = tp + fp + fn
score = (tp + self.eps * (union == 0).float()) / (tp + fp + fn +
self.eps)
return torch.mean(score)
def reset(self):
"""Method to reset the accumulation lists."""
self._outputs = []
self._targets = []
class Model(nn.Module):
"""Computes intersection over union Loss.
IOULoss = 1 - iou_score
"""
def __init__(self, class_dim=1):
"""Constructor method for IOULoss.
Args:
class_dim: indicates class dimension (K) for
outputs and targets tensors (default = 1)
"""
super().__init__()
self.iou = IOU(threshold=None, class_dim=class_dim)
def forward(self, outputs: 'torch.Tensor', targets: 'torch.
# ... truncated (>4000 chars) for memory efficiency |
MeshEdgeEmbeddingLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yd/cydbtjoq352gcolmflbvu2nqkda7xg7q5hnvltb47jsg5dbmubym.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# linear => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7b/c7bf34fgn2dhohe7ejneqlees25vyq6sbe4c5lfvoehzliak2nz6.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.add]
# Source node to ATen node mapping:
# linear => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
from torch import nn
class MeshEdgeEmbeddingLayer(nn.Module):
"""
Very important - who said that a-c is meaningfull at first layer...
"""
def __init__(self, input_size, embedding_size, bias=True):
super(MeshEdgeEmbeddingLayer, self).__init__()
self.lin = nn.Linear(input_size, embedding_size, bias=bias)
def forward(self, x):
x = x.squeeze(-1)
x = x.permute(0, 2, 1)
return self.lin(x).permute(0, 2, 1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'embedding_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(64)](buf2, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
class MeshEdgeEmbeddingLayerNew(nn.Module):
"""
Very important - who said that a-c is meaningfull at first layer...
"""
def __init__(self, input_size, embedding_size, bias=True):
super(MeshEdgeEmbeddingLayerNew, self).__init__()
self.lin = nn.Linear(input_size, embedding_size, bias=bias)
def forward(self, input_0):
primals_2 = self.lin.weight
primals_3 = self.lin.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| eldadp100/The-Mesh-Transformer | MeshEdgeEmbeddingLayer | false | 6,642 | [
"MIT"
] | 1 | b3ab18f774251feff1093040dfdcf7b836a43505 | https://github.com/eldadp100/The-Mesh-Transformer/tree/b3ab18f774251feff1093040dfdcf7b836a43505 | import torch
import torch.utils.data
import torch
from torch import nn
class Model(nn.Module):
"""
Very important - who said that a-c is meaningfull at first layer...
"""
def __init__(self, input_size, embedding_size, bias=True):
super().__init__()
self.lin = nn.Linear(input_size, embedding_size, bias=bias)
def forward(self, x):
x = x.squeeze(-1)
x = x.permute(0, 2, 1)
return self.lin(x).permute(0, 2, 1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sa/csap5cthikzuwdmqu2ehow73ilvezq4ym46wfjbhn3cebigfe7xy.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.softplus]
# Source node to ATen node mapping:
# hidden => div, exp, gt, log1p, mul, where
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 20.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %div), kwargs = {})
triton_poi_fused_softplus_0 = async_compile.triton('triton_poi_fused_softplus_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q5/cq52p2qap7uob2ddnn4qeh67r3muutkp3yhbkqpu4eqaemol3idl.py
# Topologically Sorted Source Nodes: [loc_img], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# loc_img => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.softplus]
stream0 = get_raw_stream(0)
triton_poi_fused_softplus_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [loc_img], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self, z_dim, hidden_dim, input_dim):
super().__init__()
self.fc1 = nn.Linear(z_dim, hidden_dim)
self.fc21 = nn.Linear(hidden_dim, input_dim)
self.softplus = nn.Softplus()
self.sigmoid = nn.Sigmoid()
def forward(self, z):
hidden = self.softplus(self.fc1(z))
loc_img = self.sigmoid(self.fc21(hidden))
return loc_img
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'z_dim': 4, 'hidden_dim': 4, 'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_1[grid(256)](buf3, primals_5, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_4
class DecoderNew(nn.Module):
def __init__(self, z_dim, hidden_dim, input_dim):
super().__init__()
self.fc1 = nn.Linear(z_dim, hidden_dim)
self.fc21 = nn.Linear(hidden_dim, input_dim)
self.softplus = nn.Softplus()
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc21.weight
primals_5 = self.fc21.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| einbandi/samplednn | Decoder | false | 6,643 | [
"MIT"
] | 1 | 3525e46ab5096a569dde40e5a10d6ee05128ec7d | https://github.com/einbandi/samplednn/tree/3525e46ab5096a569dde40e5a10d6ee05128ec7d | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, z_dim, hidden_dim, input_dim):
super().__init__()
self.fc1 = nn.Linear(z_dim, hidden_dim)
self.fc21 = nn.Linear(hidden_dim, input_dim)
self.softplus = nn.Softplus()
self.sigmoid = nn.Sigmoid()
def forward(self, z):
hidden = self.softplus(self.fc1(z))
loc_img = self.sigmoid(self.fc21(hidden))
return loc_img
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
AuxiliaryConvolutions | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/k3/ck32qkbu76goin6gngorb46frxtcgido7u4gqqjikn6bs3l76qke.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = (yindex // 1024)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None)
tl.store(out_ptr0 + (y0 + (1024*x2) + (4194304*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iw/ciw5fne4c4ykscbegdmm3uvzowo3xwefv4ro2tovkicwghjx4kku.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qn/cqnvlz36e5n74qbwjehi6cgr4dntmtxxsduqflrrittcgu3yf256.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dt/cdtzy2sk4ud45h7rfcart7sv7jm567awlymms4zgeznths2wtsqv.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ge/cgeoavt4hzgtf4m2q7qssqlp3nld2sgotuhmnrsgchg7mbqmoq6z.py
# Topologically Sorted Source Nodes: [conv2d_1, out_1, conv2d_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# conv2d_2 => convolution_2
# out_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 1024], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 1024
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = (yindex // 512)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (524288*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + (1024*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (512*x2) + (524288*y1)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7j/c7jq3fq7s3nff7zmqw3tkclugp3t4n7gtljlodonzl7vtd77ccv6.py
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# out_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bx/cbx47x2dxzsrue4u24vxstww4bfmxqkborergbnxsewnl2ohqpcj.py
# Topologically Sorted Source Nodes: [conv2d_3, out_3, conv2d_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# conv2d_4 => convolution_4
# out_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = (yindex // 256)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (65536*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + (256*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (256*x2) + (65536*y1)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hx/chxoq3xxsaciy3qpsdg7bzm7yh45vwcakjrprs74f5aqhz23ftak.py
# Topologically Sorted Source Nodes: [conv2d_4, out_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# out_4 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/o4/co4poaxsi3ynmtrayjqpbklkdqtnt7ufei7tynzfqeyxnr34djrs.py
# Topologically Sorted Source Nodes: [conv2d_5, out_5, conv2d_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_5 => convolution_5
# conv2d_6 => convolution_6
# out_5 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_4, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 196
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = (yindex // 256)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (50176*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + (196*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (256*x2) + (50176*y1)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/e3/ce3s664utrz3vjali3f3qxxqtzcxam23vzxcz5kt5uaqjjtawb7h.py
# Topologically Sorted Source Nodes: [conv2d_6, out_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# out_6 => relu_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 100352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6t/c6tucgktlc6hbc3e47hpkzjzo3jk4qfiy427zrqtws2iag7p6avh.py
# Topologically Sorted Source Nodes: [conv2d_7, conv11_2_feats], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv11_2_feats => relu_7
# conv2d_7 => convolution_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_6, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_10 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 144
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = (yindex // 256)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (36864*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (144*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (256*x2) + (36864*y1)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args
args.clear()
assert_size_stride(primals_1, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_4, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (512, ), (1, ))
assert_size_stride(primals_6, (128, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (256, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 4096, 4096, grid=grid(4096, 4096), stream=stream0)
del primals_3
buf1 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_4, buf1, 131072, 9, grid=grid(131072, 9), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_8, buf2, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_8
buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_12, buf3, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_12
buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_16, buf4, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_16
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf6, primals_2, 4194304, grid=grid(4194304), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 512, 32, 32), (524288, 1, 16384, 512))
buf8 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32)
buf9 = empty_strided_cuda((4, 512, 32, 32), (524288, 1, 16384, 512), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, out_1, conv2d_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf7, primals_5, buf8, buf9, 2048, 1024, grid=grid(2048, 1024), stream=stream0)
del buf7
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 32, 32), (131072, 1, 4096, 128))
del buf9
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf11, primals_7, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf13 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32)
buf14 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_3, out_3, conv2d_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf12, primals_9, buf13, buf14, 1024, 256, grid=grid(1024, 256), stream=stream0)
del buf12
del primals_9
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 128, 16, 16), (32768, 1, 2048, 128))
del buf14
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, out_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf16, primals_11, 131072, grid=grid(131072), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf16, buf3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 14, 14), (50176, 1, 3584, 256))
buf18 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1), torch.float32)
buf19 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_5, out_5, conv2d_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf17, primals_13, buf18, buf19, 1024, 196, grid=grid(1024, 196), stream=stream0)
del buf17
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 14, 14), (25088, 1, 1792, 128))
del buf19
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, out_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf21, primals_15, 100352, grid=grid(100352), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 256, 12, 12), (36864, 1, 3072, 256))
buf23 = empty_strided_cuda((4, 256, 12, 12), (36864, 144, 12, 1), torch.float32)
buf24 = empty_strided_cuda((4, 256, 12, 12), (36864, 1, 3072, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_7, conv11_2_feats], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_10.run(buf22, primals_17, buf23, buf24, 1024, 144, grid=grid(1024, 144), stream=stream0)
del buf22
del primals_17
return (buf8, buf13, buf18, buf23, primals_1, buf0, buf1, primals_6, buf2, primals_10, buf3, primals_14, buf4, buf6, buf8, buf11, buf13, buf16, buf18, buf21, buf24, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1024, 64, 64), (4194304, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
from itertools import product as product
import torch.optim
import torch.utils.data
class AuxiliaryConvolutions(nn.Module):
"""
Additional convolutions to produce higher-level feature maps.
"""
def __init__(self):
super(AuxiliaryConvolutions, self).__init__()
self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0)
self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)
self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0)
self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)
self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.0)
def forward(self, conv7_feats):
"""
Forward propagation.
:param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2
"""
out = F.relu(self.conv8_1(conv7_feats))
out = F.relu(self.conv8_2(out))
conv8_2_feats = out
out = F.relu(self.conv9_1(out))
out = F.relu(self.conv9_2(out))
conv9_2_feats = out
out = F.relu(self.conv10_1(out))
out = F.relu(self.conv10_2(out))
conv10_2_feats = out
out = F.relu(self.conv11_1(out))
conv11_2_feats = F.relu(self.conv11_2(out))
return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats
def get_inputs():
return [torch.rand([4, 1024, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
from itertools import product as product
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = yindex // 1024
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None)
tl.store(out_ptr0 + (y0 + 1024 * x2 + 4194304 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 1024
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 524288 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + 1024 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 512 * x2 + 524288 * y1), tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 65536 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + 256 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 256 * x2 + 65536 * y1), tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 196
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 50176 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + 196 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 256 * x2 + 50176 * y1), tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 144
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 36864 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 144 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 256 * x2 + 36864 * y1), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17) = args
args.clear()
assert_size_stride(primals_1, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 1024, 64, 64), (4194304, 4096, 64, 1))
assert_size_stride(primals_4, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_5, (512,), (1,))
assert_size_stride(primals_6, (128, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024, 64, 64), (4194304, 1, 65536,
1024), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(4096, 4096)](primals_3, buf0, 4096, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_1[grid(131072, 9)](primals_4, buf1, 131072, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(32768, 9)](primals_8, buf2, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(32768, 9)](primals_12, buf3, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_12
buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_2[grid(32768, 9)](primals_16, buf4, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_16
buf5 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_3[grid(4194304)](buf6, primals_2,
4194304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, buf1, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 512, 32, 32), (524288, 1, 16384, 512))
buf8 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1),
torch.float32)
buf9 = empty_strided_cuda((4, 512, 32, 32), (524288, 1, 16384, 512),
torch.float32)
triton_poi_fused_convolution_relu_4[grid(2048, 1024)](buf7,
primals_5, buf8, buf9, 2048, 1024, XBLOCK=64, YBLOCK=64,
num_warps=8, num_stages=1)
del buf7
del primals_5
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 32, 32), (131072, 1, 4096, 128))
del buf9
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_5[grid(524288)](buf11, primals_7,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_7
buf12 = extern_kernels.convolution(buf11, buf2, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf13 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.float32)
buf14 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_convolution_relu_6[grid(1024, 256)](buf12,
primals_9, buf13, buf14, 1024, 256, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del buf12
del primals_9
buf15 = extern_kernels.convolution(buf14, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 128, 16, 16), (32768, 1, 2048, 128))
del buf14
buf16 = buf15
del buf15
triton_poi_fused_convolution_relu_7[grid(131072)](buf16, primals_11,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf17 = extern_kernels.convolution(buf16, buf3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 14, 14), (50176, 1, 3584, 256))
buf18 = empty_strided_cuda((4, 256, 14, 14), (50176, 196, 14, 1),
torch.float32)
buf19 = empty_strided_cuda((4, 256, 14, 14), (50176, 1, 3584, 256),
torch.float32)
triton_poi_fused_convolution_relu_8[grid(1024, 196)](buf17,
primals_13, buf18, buf19, 1024, 196, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del buf17
del primals_13
buf20 = extern_kernels.convolution(buf19, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 128, 14, 14), (25088, 1, 1792, 128))
del buf19
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_9[grid(100352)](buf21, primals_15,
100352, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf22 = extern_kernels.convolution(buf21, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 256, 12, 12), (36864, 1, 3072, 256))
buf23 = empty_strided_cuda((4, 256, 12, 12), (36864, 144, 12, 1),
torch.float32)
buf24 = empty_strided_cuda((4, 256, 12, 12), (36864, 1, 3072, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_10[grid(1024, 144)
](buf22, primals_17, buf23, buf24, 1024, 144, XBLOCK=32, YBLOCK
=32, num_warps=4, num_stages=1)
del buf22
del primals_17
return (buf8, buf13, buf18, buf23, primals_1, buf0, buf1, primals_6,
buf2, primals_10, buf3, primals_14, buf4, buf6, buf8, buf11, buf13,
buf16, buf18, buf21, buf24)
class AuxiliaryConvolutionsNew(nn.Module):
"""
Additional convolutions to produce higher-level feature maps.
"""
def __init__(self):
super(AuxiliaryConvolutionsNew, self).__init__()
self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0)
self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)
self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0)
self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)
self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.0)
def forward(self, input_0):
primals_1 = self.conv8_1.weight
primals_2 = self.conv8_1.bias
primals_4 = self.conv8_2.weight
primals_5 = self.conv8_2.bias
primals_6 = self.conv9_1.weight
primals_7 = self.conv9_1.bias
primals_8 = self.conv9_2.weight
primals_9 = self.conv9_2.bias
primals_10 = self.conv10_1.weight
primals_11 = self.conv10_1.bias
primals_12 = self.conv10_2.weight
primals_13 = self.conv10_2.bias
primals_14 = self.conv11_1.weight
primals_15 = self.conv11_1.bias
primals_16 = self.conv11_2.weight
primals_17 = self.conv11_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17])
return output[0], output[1], output[2], output[3]
| dee-walia20/SSD-Implementation-using-Pytorch | AuxiliaryConvolutions | false | 6,644 | [
"MIT"
] | 1 | 2a7dcdcea2787f4bffd45f335819f08af2b525dd | https://github.com/dee-walia20/SSD-Implementation-using-Pytorch/tree/2a7dcdcea2787f4bffd45f335819f08af2b525dd | import torch
from torch import nn
import torch.nn.functional as F
from itertools import product as product
import torch.optim
import torch.utils.data
class Model(nn.Module):
"""
Additional convolutions to produce higher-level feature maps.
"""
def __init__(self):
super().__init__()
self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0)
self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)
self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0)
self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)
self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.0)
def forward(self, conv7_feats):
"""
Forward propagation.
:param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2
"""
out = F.relu(self.conv8_1(conv7_feats))
out = F.relu(self.conv8_2(out))
conv8_2_feats = out
out = F.relu(self.conv9_1(out))
out = F.relu(self.conv9_2(out))
conv9_2_feats = out
out = F.relu(self.conv10_1(out))
out = F.relu(self.conv10_2(out))
conv10_2_feats = out
out = F.relu(self.conv11_1(out))
conv11_2_feats = F.relu(self.conv11_2(out))
return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats
def get_inputs():
return [torch.rand([4, 1024, 64, 64])]
def get_init_inputs():
return []
|
GELU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/iz/ciz2f2pscrf2tsmzct4hd4myt3fkrqwmv3eh6oduxwelwqmkr4vm.py
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# sigmoid => sigmoid
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.702), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg0_1), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.702
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp3 * tmp0
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GELU(nn.Module):
def forward(self, x):
return torch.sigmoid(1.702 * x) * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.702
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp3 * tmp0
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| endaaman/augmix | GELU | false | 6,645 | [
"Apache-2.0"
] | 1 | 11c86a126c7b261ca178a715763763ca22b20b81 | https://github.com/endaaman/augmix/tree/11c86a126c7b261ca178a715763763ca22b20b81 | import torch
import torch.nn as nn
class Model(nn.Module):
def forward(self, x):
return torch.sigmoid(1.702 * x) * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Noise_injector | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/e3/ce3gjfeicvychl6kukct6du2o5mgo73k565s2xlolyc6qtdeydpa.py
# Topologically Sorted Source Nodes: [add, add_1, feature_map], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# feature_map => mul
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_6, %view), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, 1e-05), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add_1), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tmp2 * tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_7, %primals_8, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, primals_3, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, add_1, feature_map], Original ATen: [aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_6, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf4, primals_8, 256, grid=grid(256), stream=stream0)
del primals_8
return (buf4, primals_3, primals_6, primals_7, buf0, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def truncated_normal_(tensor, mean=0, std=1):
size = tensor.shape
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
def init_weights_orthogonal_normal(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.orthogonal_(m.weight)
truncated_normal_(m.bias, mean=0, std=0.001)
def weights_init(m):
classname = m.__class__.__name__
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
if classname.find('Linear') != -1:
nn.init.xavier_uniform_(m.weight.data)
m.bias.data.fill_(0)
class Noise_injector(nn.Module):
def __init__(self, n_hidden, z_dim, num_channels, n_channels_out,
device='cpu'):
super(Noise_injector, self).__init__()
self.num_channels = num_channels
self.n_channels_out = n_channels_out
self.n_hidden = n_hidden
self.z_dim = z_dim
self.device = device
self.residual = nn.Linear(self.z_dim, self.n_hidden)
self.scale = nn.Linear(self.z_dim, self.n_hidden)
self.last_layer = nn.Conv2d(self.n_hidden, self.n_channels_out,
kernel_size=1)
self.residual.apply(weights_init)
self.scale.apply(weights_init)
self.last_layer.apply(init_weights_orthogonal_normal)
def forward(self, feature_map, z):
"""
Z is B x Z_dim and feature_map is B x C x H x W.
So broadcast Z to batch_sizexlatent_dimxHxW. Behavior is exactly the same as tf.tile (verified)
"""
residual = self.residual(z).view(z.shape[0], self.n_hidden, 1, 1)
scale = self.scale(z).view(z.shape[0], self.n_hidden, 1, 1)
feature_map = (feature_map + residual) * (scale + 1e-05)
return self.last_layer(feature_map)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_hidden': 4, 'z_dim': 4, 'num_channels': 4,
'n_channels_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 1e-05
tmp5 = tmp3 + tmp4
tmp6 = tmp2 * tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, primals_3, reinterpret_tensor(
primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_6, buf0, buf1, buf2,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(256)](buf4, primals_8, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_8
return buf4, primals_3, primals_6, primals_7, buf0, buf1, buf2
def truncated_normal_(tensor, mean=0, std=1):
size = tensor.shape
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
def init_weights_orthogonal_normal(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.orthogonal_(m.weight)
truncated_normal_(m.bias, mean=0, std=0.001)
def weights_init(m):
classname = m.__class__.__name__
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
if classname.find('Linear') != -1:
nn.init.xavier_uniform_(m.weight.data)
m.bias.data.fill_(0)
class Noise_injectorNew(nn.Module):
def __init__(self, n_hidden, z_dim, num_channels, n_channels_out,
device='cpu'):
super(Noise_injectorNew, self).__init__()
self.num_channels = num_channels
self.n_channels_out = n_channels_out
self.n_hidden = n_hidden
self.z_dim = z_dim
self.device = device
self.residual = nn.Linear(self.z_dim, self.n_hidden)
self.scale = nn.Linear(self.z_dim, self.n_hidden)
self.last_layer = nn.Conv2d(self.n_hidden, self.n_channels_out,
kernel_size=1)
self.residual.apply(weights_init)
self.scale.apply(weights_init)
self.last_layer.apply(init_weights_orthogonal_normal)
def forward(self, input_0, input_1):
primals_1 = self.residual.weight
primals_2 = self.residual.bias
primals_3 = self.scale.weight
primals_5 = self.scale.bias
primals_7 = self.last_layer.weight
primals_8 = self.last_layer.bias
primals_4 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| dkgupta90/CARMSS | Noise_injector | false | 6,646 | [
"Apache-2.0"
] | 1 | 1f397caa39b9f504951285eff150857f7d86a7c3 | https://github.com/dkgupta90/CARMSS/tree/1f397caa39b9f504951285eff150857f7d86a7c3 | import torch
import torch.nn as nn
def truncated_normal_(tensor, mean=0, std=1):
size = tensor.shape
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
def init_weights_orthogonal_normal(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.orthogonal_(m.weight)
truncated_normal_(m.bias, mean=0, std=0.001)
def weights_init(m):
classname = m.__class__.__name__
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
if classname.find('Linear') != -1:
nn.init.xavier_uniform_(m.weight.data)
m.bias.data.fill_(0)
class Model(nn.Module):
def __init__(self, n_hidden, z_dim, num_channels, n_channels_out,
device='cpu'):
super().__init__()
self.num_channels = num_channels
self.n_channels_out = n_channels_out
self.n_hidden = n_hidden
self.z_dim = z_dim
self.device = device
self.residual = nn.Linear(self.z_dim, self.n_hidden)
self.scale = nn.Linear(self.z_dim, self.n_hidden)
self.last_layer = nn.Conv2d(self.n_hidden, self.n_channels_out,
kernel_size=1)
self.residual.apply(weights_init)
self.scale.apply(weights_init)
self.last_layer.apply(init_weights_orthogonal_normal)
def forward(self, feature_map, z):
"""
Z is B x Z_dim and feature_map is B x C x H x W.
So broadcast Z to batch_sizexlatent_dimxHxW. Behavior is exactly the same as tf.tile (verified)
"""
residual = self.residual(z).view(z.shape[0], self.n_hidden, 1, 1)
scale = self.scale(z).view(z.shape[0], self.n_hidden, 1, 1)
feature_map = (feature_map + residual) * (scale + 1e-05)
return self.last_layer(feature_map)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_hidden': 4, 'z_dim': 4, 'num_channels': 4,
'n_channels_out': 4}]
|
VoxelFeatureExtractor | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/f2/cf2faolme3wnnwp6icjm6dfbqfd7sk3y4me6nfknaftn55eci6dj.py
# Topologically Sorted Source Nodes: [sum_1, truediv], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# sum_1 => sum_1
# truediv => div
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %view), kwargs = {})
triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, truediv], Original ATen: [aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sum_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class VoxelFeatureExtractor(nn.Module):
"""Computes mean of non-zero points within voxel."""
def forward(self, feature, occupancy):
"""
:feature FloatTensor of shape (N, K, C)
:return FloatTensor of shape (N, C)
"""
denominator = occupancy.type_as(feature).view(-1, 1)
feature = (feature.sum(1) / denominator).contiguous()
return feature
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(64)](arg1_1, arg0_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class VoxelFeatureExtractorNew(nn.Module):
"""Computes mean of non-zero points within voxel."""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| eraofelix/PV-RCNN | VoxelFeatureExtractor | false | 6,647 | [
"MIT"
] | 1 | 6361ec99cc1c92120263ef56b2c2b003c2cd7264 | https://github.com/eraofelix/PV-RCNN/tree/6361ec99cc1c92120263ef56b2c2b003c2cd7264 | import torch
from torch import nn
class Model(nn.Module):
"""Computes mean of non-zero points within voxel."""
def forward(self, feature, occupancy):
"""
:feature FloatTensor of shape (N, K, C)
:return FloatTensor of shape (N, C)
"""
denominator = occupancy.type_as(feature).view(-1, 1)
feature = (feature.sum(1) / denominator).contiguous()
return feature
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
QModReLU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/kz/ckzirxsv4jlwhbzbd4ahloxq2qzsker6xqsnj3amgmf53cwqy237.py
# Topologically Sorted Source Nodes: [norm, add, relu, truediv, mul], Original ATen: [aten.linalg_vector_norm, aten.add, aten.relu, aten.div, aten.mul, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# mul => mul
# norm => pow_1, pow_2, sum_1
# relu => relu
# truediv => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
# %pow_2 : [num_users=3] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, %primals_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %div), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_per_fused_add_div_linalg_vector_norm_mul_relu_threshold_backward_0 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mul_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mul_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mul_relu_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_ptr1 + (0))
tmp7 = tl.broadcast_to(tmp6, [1])
tmp13 = tl.broadcast_to(tmp6, [RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = libdevice.sqrt(tmp4)
tmp8 = tmp5 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tmp14 = tmp5 + tmp13
tmp15 = triton_helpers.maximum(tmp9, tmp14)
tmp16 = tmp0 / tmp5
tmp17 = tmp15 * tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp5, None)
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
tl.store(out_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
buf3 = empty_strided_cuda((1, ), (1, ), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, add, relu, truediv, mul], Original ATen: [aten.linalg_vector_norm, aten.add, aten.relu, aten.div, aten.mul, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_mul_relu_threshold_backward_0.run(buf1, primals_1, primals_2, buf3, buf2, 1, 256, grid=grid(1), stream=stream0)
del primals_2
return (buf2, primals_1, buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.fx
class QModReLU(torch.nn.Module):
"""
Quaternion ModeReLU
"""
def __init__(self, bias=0):
super().__init__()
self.bias = torch.nn.Parameter(torch.Tensor([bias]))
def forward(self, x):
norm = x.norm()
return F.relu(norm + self.bias) * (x / norm)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.fx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mul_relu_threshold_backward_0(
in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + 0)
tmp7 = tl.broadcast_to(tmp6, [1])
tmp13 = tl.broadcast_to(tmp6, [RBLOCK])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = libdevice.sqrt(tmp4)
tmp8 = tmp5 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tmp14 = tmp5 + tmp13
tmp15 = triton_helpers.maximum(tmp9, tmp14)
tmp16 = tmp0 / tmp5
tmp17 = tmp15 * tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp17, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
buf3 = empty_strided_cuda((1,), (1,), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_mul_relu_threshold_backward_0[
grid(1)](buf1, primals_1, primals_2, buf3, buf2, 1, 256,
num_warps=2, num_stages=1)
del primals_2
return buf2, primals_1, buf1, buf3
class QModReLUNew(torch.nn.Module):
"""
Quaternion ModeReLU
"""
def __init__(self, bias=0):
super().__init__()
self.bias = torch.nn.Parameter(torch.Tensor([bias]))
def forward(self, input_0):
primals_2 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| eleGAN23/HI2I | QModReLU | false | 6,648 | [
"MIT"
] | 1 | 7730ee0963614290099b011c113048ef6d1b149c | https://github.com/eleGAN23/HI2I/tree/7730ee0963614290099b011c113048ef6d1b149c | import torch
import torch.nn.functional as F
import torch.fx
class Model(torch.nn.Module):
"""
Quaternion ModeReLU
"""
def __init__(self, bias=0):
super().__init__()
self.bias = torch.nn.Parameter(torch.Tensor([bias]))
def forward(self, x):
norm = x.norm()
return F.relu(norm + self.bias) * (x / norm)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DecoderNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nq/cnqjufcqn3ur3s7xvlb2i747nyf24md4zaiatlwgkasynplfjstu.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kk/ckk6sapqkp4bzozh3ppr4njcop53t2g45ppw7w7w7sovncunc5j6.py
# Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.5), kwargs = {})
triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 + tmp3
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 64), (64, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 4096, grid=grid(4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 4), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [mul, add], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class DecoderNet(nn.Module):
"""
The decoder takes an interpolated feature vector and turn it into the
output signal. This net is intended to be very lightweight, it has only one
hidden layer.
"""
def __init__(self, feature_size, signal_dimension, hidden_layer_size=64):
"""
@param feature_size dimension of an input feature vector (C)
@param signal_dimension number of component in the output signal
e.g. 3 or 4 for an image, 1 for a signed
distance field, etc.
@param hidden_layer_size number of neurons in the hidden layer
"""
super().__init__()
self.fc1 = nn.Linear(feature_size, hidden_layer_size)
self.fc2 = nn.Linear(hidden_layer_size, signal_dimension)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x * 0.5 + 0.5
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'feature_size': 4, 'signal_dimension': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_add_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 + tmp3
tl.store(in_out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 64), (64, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf4, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 4), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_mul_1[grid(256)](buf3, primals_5, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), primals_4, buf4
class DecoderNetNew(nn.Module):
"""
The decoder takes an interpolated feature vector and turn it into the
output signal. This net is intended to be very lightweight, it has only one
hidden layer.
"""
def __init__(self, feature_size, signal_dimension, hidden_layer_size=64):
"""
@param feature_size dimension of an input feature vector (C)
@param signal_dimension number of component in the output signal
e.g. 3 or 4 for an image, 1 for a signed
distance field, etc.
@param hidden_layer_size number of neurons in the hidden layer
"""
super().__init__()
self.fc1 = nn.Linear(feature_size, hidden_layer_size)
self.fc2 = nn.Linear(hidden_layer_size, signal_dimension)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| eliemichel/ReACORN | DecoderNet | false | 6,649 | [
"MIT"
] | 1 | 74501551ecb387352271674efb2ed6240d234df6 | https://github.com/eliemichel/ReACORN/tree/74501551ecb387352271674efb2ed6240d234df6 | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
"""
The decoder takes an interpolated feature vector and turn it into the
output signal. This net is intended to be very lightweight, it has only one
hidden layer.
"""
def __init__(self, feature_size, signal_dimension, hidden_layer_size=64):
"""
@param feature_size dimension of an input feature vector (C)
@param signal_dimension number of component in the output signal
e.g. 3 or 4 for an image, 1 for a signed
distance field, etc.
@param hidden_layer_size number of neurons in the hidden layer
"""
super().__init__()
self.fc1 = nn.Linear(feature_size, hidden_layer_size)
self.fc2 = nn.Linear(hidden_layer_size, signal_dimension)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x * 0.5 + 0.5
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
AlexOutputBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ha/chaktsrgunyq3tj3pn3tjyf7qwqukjk2xfykkt6oxptqoeazrm6d.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_3, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + (x4), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, None)
tl.store(out_ptr0 + (x4), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kx/ckxd2bn7avnmtokgk2tmgkoeg2tv6ciq5vssxjke2uu3k6jgznbu.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.view]
# Source node to ATen node mapping:
# x_3 => view_5
# Graph fragment:
# %view_5 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_3, [64, 4096]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4096
x1 = (xindex // 4096)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4096*x1) + (16384*((x1 % 4) // 4)) + (65536*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4096, 4), (4, 1))
assert_size_stride(primals_2, (4096, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4096, 4096), (4096, 1))
assert_size_stride(primals_5, (4096, ), (1, ))
assert_size_stride(primals_6, (4, 4096), (4096, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4096), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4096), (65536, 16384, 4096, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4096), (65536, 16384, 4096, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 262144, grid=grid(262144), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 262144, grid=grid(262144), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4096, 4096), (1, 4096), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4096), (65536, 16384, 4096, 1), 0); del buf3 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4096), (65536, 16384, 4096, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf4, primals_5, buf7, 262144, grid=grid(262144), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf4, buf5, 262144, grid=grid(262144), stream=stream0)
del buf4
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (4096, 4), (1, 4096), 0), alpha=1, beta=1, out=buf6)
del primals_7
return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4096, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4096, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class AlexDense(nn.Module):
"""
AlexNet specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self, in_channels, out_channels):
super(AlexDense, self).__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class AlexOutputBlock(nn.Module):
"""
AlexNet specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self, in_channels, classes):
super(AlexOutputBlock, self).__init__()
mid_channels = 4096
self.fc1 = AlexDense(in_channels=in_channels, out_channels=mid_channels
)
self.fc2 = AlexDense(in_channels=mid_channels, out_channels=
mid_channels)
self.fc3 = nn.Linear(in_features=mid_channels, out_features=classes)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 4096
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, None)
tl.store(out_ptr0 + x4, tmp6, None)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4096
x1 = xindex // 4096
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 16384 * (x1 % 4 // 4) +
65536 * ((4 * (x1 // 4 % 4) + x1 % 4) // 16)), None)
tl.store(out_ptr0 + x2, tmp0, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4096, 4), (4, 1))
assert_size_stride(primals_2, (4096,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4096, 4096), (4096, 1))
assert_size_stride(primals_5, (4096,), (1,))
assert_size_stride(primals_6, (4, 4096), (4096, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4096), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4096), (65536, 16384,
4096, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4096), (65536, 16384, 4096, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(262144)](buf1,
primals_2, buf8, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
triton_poi_fused_view_1[grid(262144)](buf1, buf2, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4096), (4096, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4096, 4096),
(1, 4096), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4096), (65536, 16384,
4096, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 4096), (65536, 16384, 4096, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(262144)](buf4,
primals_5, buf7, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 4096), (4096, 1), torch.float32)
triton_poi_fused_view_1[grid(262144)](buf4, buf5, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
del buf4
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(4096, 4), (1, 4096), 0), alpha=1, beta=1, out=buf6)
del primals_7
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf5, primals_6, buf7, primals_4, buf8
class AlexDense(nn.Module):
"""
AlexNet specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self, in_channels, out_channels):
super(AlexDense, self).__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class AlexOutputBlockNew(nn.Module):
"""
AlexNet specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self, in_channels, classes):
super(AlexOutputBlockNew, self).__init__()
mid_channels = 4096
self.fc1 = AlexDense(in_channels=in_channels, out_channels=mid_channels
)
self.fc2 = AlexDense(in_channels=mid_channels, out_channels=
mid_channels)
self.fc3 = nn.Linear(in_features=mid_channels, out_features=classes)
def forward(self, input_0):
primals_1 = self.fc1.fc.weight
primals_2 = self.fc1.fc.bias
primals_4 = self.fc2.fc.weight
primals_5 = self.fc2.fc.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| earhian/imgclsmob | AlexOutputBlock | false | 6,650 | [
"MIT"
] | 1 | c87c0942420876941868c016211073dec4392e4d | https://github.com/earhian/imgclsmob/tree/c87c0942420876941868c016211073dec4392e4d | import torch
import torch.nn as nn
import torch.utils.data
class AlexDense(nn.Module):
"""
AlexNet specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.fc = nn.Linear(in_features=in_channels, out_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class Model(nn.Module):
"""
AlexNet specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self, in_channels, classes):
super().__init__()
mid_channels = 4096
self.fc1 = AlexDense(in_channels=in_channels, out_channels=mid_channels
)
self.fc2 = AlexDense(in_channels=mid_channels, out_channels=
mid_channels)
self.fc3 = nn.Linear(in_features=mid_channels, out_features=classes)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SelfAttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [Q], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# Q => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/eh/ceheq5ns3kg3p6tebb47gdy475c5v4keklf245jl4fgbpvugznm5.py
# Topologically Sorted Source Nodes: [attention_mat_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_mat_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [attention_mat_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_mat_1 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ck/cckqpocvlv6hl7nn6zons2xult3w2niv265o4qj66g4363uu6q3f.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_9), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_out_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [Q], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_3, buf10, 256, grid=grid(256), stream=stream0)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [K], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf9, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_mat_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attention_mat_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [new_values], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf8, primals_1, 256, grid=grid(256), stream=stream0)
return (buf8, buf6, primals_1, buf6, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), buf9, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.utils.data
import torch
from torch import nn
import torch.nn.functional as F
class SelfAttentionLayer(nn.Module):
def __init__(self, elem_size, embd_size):
super(SelfAttentionLayer, self).__init__()
self.embd_size = embd_size
self.query_lin = nn.Linear(elem_size, embd_size)
self.key_lin = nn.Linear(elem_size, embd_size)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
if len(x.shape) == 3:
x = x.unsqueeze(1)
_N, _num_patches, _seq_size, elem_size = x.shape
Q = F.relu(self.query_lin(x))
K = F.relu(self.key_lin(x))
attention_mat = torch.matmul(Q, K.permute(0, 1, 3, 2)) / math.sqrt(
elem_size)
attention_mat = F.softmax(attention_mat, dim=-1)
new_values = torch.matmul(attention_mat, x)
out = new_values
out = x + out
return out, attention_mat
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'elem_size': 4, 'embd_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2,
primals_3, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0),
out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused_add_3[grid(256)](buf8, primals_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf8, buf6, primals_1, buf6, reinterpret_tensor(buf2, (16, 4, 4),
(16, 1, 4), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0
), buf9, buf10
class SelfAttentionLayerNew(nn.Module):
def __init__(self, elem_size, embd_size):
super(SelfAttentionLayerNew, self).__init__()
self.embd_size = embd_size
self.query_lin = nn.Linear(elem_size, embd_size)
self.key_lin = nn.Linear(elem_size, embd_size)
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0):
primals_2 = self.query_lin.weight
primals_3 = self.query_lin.bias
primals_4 = self.key_lin.weight
primals_5 = self.key_lin.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| eldadp100/The-Mesh-Transformer | SelfAttentionLayer | false | 6,651 | [
"MIT"
] | 1 | b3ab18f774251feff1093040dfdcf7b836a43505 | https://github.com/eldadp100/The-Mesh-Transformer/tree/b3ab18f774251feff1093040dfdcf7b836a43505 | import math
import torch
import torch.utils.data
import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, elem_size, embd_size):
super().__init__()
self.embd_size = embd_size
self.query_lin = nn.Linear(elem_size, embd_size)
self.key_lin = nn.Linear(elem_size, embd_size)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
if len(x.shape) == 3:
x = x.unsqueeze(1)
_N, _num_patches, _seq_size, elem_size = x.shape
Q = F.relu(self.query_lin(x))
K = F.relu(self.key_lin(x))
attention_mat = torch.matmul(Q, K.permute(0, 1, 3, 2)) / math.sqrt(
elem_size)
attention_mat = F.softmax(attention_mat, dim=-1)
new_values = torch.matmul(attention_mat, x)
out = new_values
out = x + out
return out, attention_mat
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BatchNorm2D_noparam | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/s5/cs5mrq32xe5ctrjdbcb5uh2wonrkxz3ou53magxshq5nm25mqctn.py
# Topologically Sorted Source Nodes: [mean, var], Original ATen: [aten.mean, aten.var]
# Source node to ATen node mapping:
# mean => mean
# var => var
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0, 2, 3], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg0_1, [0, 2, 3]), kwargs = {correction: 1, keepdim: True})
triton_per_fused_mean_var_0 = async_compile.triton('triton_per_fused_mean_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_var_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = (rindex // 16)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0) + (64*r2)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr1 + (x0), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hf/chfnmxtf7xbehzb3iwql3va3ft3xowfrrc3zkzeujngvd6kjabnc.py
# Topologically Sorted Source Nodes: [mean, sub, var, add, out], Original ATen: [aten.mean, aten.sub, aten.var, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# out => div
# sub => sub
# var => var
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0, 2, 3], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg0_1, [0, 2, 3]), kwargs = {correction: 1, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-08), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
triton_poi_fused_add_div_mean_sub_var_1 = async_compile.triton('triton_poi_fused_add_div_mean_sub_var_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_sub_var_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_sub_var_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = 64.0
tmp3 = tmp1 / tmp2
tmp4 = tmp0 - tmp3
tmp6 = 63.0
tmp7 = tmp5 / tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp10 = tmp4 / tmp9
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [mean, var], Original ATen: [aten.mean, aten.var]
stream0 = get_raw_stream(0)
triton_per_fused_mean_var_0.run(arg0_1, buf0, buf2, 4, 64, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub, var, add, out], Original ATen: [aten.mean, aten.sub, aten.var, aten.add, aten.div]
triton_poi_fused_add_div_mean_sub_var_1.run(arg0_1, buf0, buf2, buf4, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf0
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BatchNorm2D_noparam(nn.Module):
def __init__(self, eps=1e-08):
super(BatchNorm2D_noparam, self).__init__()
self.eps = eps
def forward(self, x):
_bs, _c, _h, _w = x.shape
mean = torch.mean(x, (0, 2, 3), keepdim=True)
var = torch.var(x, (0, 2, 3), keepdim=True)
out = (x - mean) / (var + self.eps)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_var_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = rindex // 16
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp18, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_sub_var_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = 64.0
tmp3 = tmp1 / tmp2
tmp4 = tmp0 - tmp3
tmp6 = 63.0
tmp7 = tmp5 / tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp10 = tmp4 / tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
buf2 = empty_strided_cuda((1, 4, 1, 1), (4, 1, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_var_0[grid(4)](arg0_1, buf0, buf2, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_sub_var_1[grid(256)](arg0_1, buf0,
buf2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del buf0
del buf2
return buf4,
class BatchNorm2D_noparamNew(nn.Module):
def __init__(self, eps=1e-08):
super(BatchNorm2D_noparamNew, self).__init__()
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ericlearning/General-I2I | BatchNorm2D_noparam | false | 6,652 | [
"MIT"
] | 1 | ba7c5d6a582bdf2e7b53c0e20c31e9097b1883a9 | https://github.com/ericlearning/General-I2I/tree/ba7c5d6a582bdf2e7b53c0e20c31e9097b1883a9 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, eps=1e-08):
super().__init__()
self.eps = eps
def forward(self, x):
_bs, _c, _h, _w = x.shape
mean = torch.mean(x, (0, 2, 3), keepdim=True)
var = torch.var(x, (0, 2, 3), keepdim=True)
out = (x - mean) / (var + self.eps)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CReLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/on/condzuwaooyuptwsw6dxafaur66nfgbtr43qtbz6jnc7qpkefima.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp10 & xmask, other=0.0)
tmp14 = -tmp13
tmp15 = triton_helpers.maximum(tmp6, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tl.store(out_ptr0 + (x3), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CReLU(nn.ReLU):
def __init__(self):
super(CReLU, self).__init__()
def forward(self, input):
return torch.cat((F.relu(input, self.inplace), F.relu(-input, self.
inplace)), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tl.full([1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp10 &
xmask, other=0.0)
tmp14 = -tmp13
tmp15 = triton_helpers.maximum(tmp6, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tl.store(out_ptr0 + x3, tmp18, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CReLUNew(nn.ReLU):
def __init__(self):
super(CReLUNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ethancaballero/multi-agent-reinforcement-learning-for-emergent-communication | CReLU | false | 6,653 | [
"MIT"
] | 1 | 426edaa1ee58b467dfc0f46fe1f83ceea26f2ed7 | https://github.com/ethancaballero/multi-agent-reinforcement-learning-for-emergent-communication/tree/426edaa1ee58b467dfc0f46fe1f83ceea26f2ed7 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.ReLU):
def __init__(self):
super().__init__()
def forward(self, input):
return torch.cat((F.relu(input, self.inplace), F.relu(-input, self.
inplace)), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LigthSpeechLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ox/coxh4zxlwehvc7dn52ckdepn62ln2jmlbsk5mwd7tp7xur2tywk6.py
# Topologically Sorted Source Nodes: [mel_loss], Original ATen: [aten.mse_loss]
# Source node to ATen node mapping:
# mel_loss => mean, pow_1, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mse_loss_0 = async_compile.triton('triton_per_fused_mse_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bq/cbqlvx3fqrosdwrna6ssfaprt3lceueeyt3u5o4low2mfdmh2ur6.py
# Topologically Sorted Source Nodes: [similarity_loss], Original ATen: [aten.sub, aten.abs, aten.mean]
# Source node to ATen node mapping:
# similarity_loss => abs_1, mean_1, sub_1
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg2_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
triton_per_fused_abs_mean_sub_1 = async_compile.triton('triton_per_fused_abs_mean_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mel_loss], Original ATen: [aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_0.run(buf3, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [similarity_loss], Original ATen: [aten.sub, aten.abs, aten.mean]
triton_per_fused_abs_mean_sub_1.run(buf4, arg3_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg2_1
del arg3_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf5 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [duration_loss], Original ATen: [aten.sub, aten.abs, aten.mean]
triton_per_fused_abs_mean_sub_1.run(buf5, arg5_1, arg4_1, 1, 256, grid=grid(1), stream=stream0)
del arg4_1
del arg5_1
return (buf3, buf4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg5_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
class LigthSpeechLoss(nn.Module):
""" LigthSpeech Loss """
def __init__(self):
super(LigthSpeechLoss, self).__init__()
def forward(self, mel, padd_predicted, cemb_out, mel_tac2_target, D, cemb):
mel_loss = nn.MSELoss()(mel, mel_tac2_target)
similarity_loss = nn.L1Loss()(cemb_out, cemb)
duration_loss = nn.L1Loss()(padd_predicted, D.float())
return mel_loss, similarity_loss, duration_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]),
torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
@triton.jit
def triton_per_fused_abs_mean_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_0[grid(1)](buf3, arg1_1, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf4 = buf1
del buf1
triton_per_fused_abs_mean_sub_1[grid(1)](buf4, arg3_1, arg2_1, 1,
256, num_warps=2, num_stages=1)
del arg2_1
del arg3_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf5 = buf2
del buf2
triton_per_fused_abs_mean_sub_1[grid(1)](buf5, arg5_1, arg4_1, 1,
256, num_warps=2, num_stages=1)
del arg4_1
del arg5_1
return buf3, buf4, buf5
class LigthSpeechLossNew(nn.Module):
""" LigthSpeech Loss """
def __init__(self):
super(LigthSpeechLossNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
arg5_1 = input_5
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return output[0], output[1], output[2]
| entn-at/LightSpeech | LigthSpeechLoss | false | 6,654 | [
"MIT"
] | 1 | 48250fbcede4b258ba13ab17e3e83afc5fe85a01 | https://github.com/entn-at/LightSpeech/tree/48250fbcede4b258ba13ab17e3e83afc5fe85a01 | import torch
from torch import nn
import torch.utils.data
class Model(nn.Module):
""" LigthSpeech Loss """
def __init__(self):
super().__init__()
def forward(self, mel, padd_predicted, cemb_out, mel_tac2_target, D, cemb):
mel_loss = nn.MSELoss()(mel, mel_tac2_target)
similarity_loss = nn.L1Loss()(cemb_out, cemb)
duration_loss = nn.L1Loss()(padd_predicted, D.float())
return mel_loss, similarity_loss, duration_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]),
torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7z/c7zwaenh5um3ia5rjbsf6yda6kmvekfkarouguqg5q2uipjuvhoj.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 143840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3596) % 10
x0 = xindex % 3596
x4 = (xindex // 3596)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nk/cnkd4znkwsgchhsdj3fjtvdgjug5wuch2mqqtgprce62bxpst5sh.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 46400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = (xindex // 20) % 58
x2 = (xindex // 1160)
x3 = xindex % 1160
tmp0 = tl.load(in_ptr0 + ((3*x0) + (62*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (3*x0) + (62*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (3*x0) + (62*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 > tmp0
tmp6 = tl.full([1], 1, tl.int8)
tmp7 = tl.full([1], 0, tl.int8)
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp9 = tmp3 > tmp2
tmp10 = tl.full([1], 2, tl.int8)
tmp11 = tl.where(tmp9, tmp10, tmp8)
tl.store(out_ptr0 + (x3 + (1184*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1280*x2)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7g/c7gmp6e24wfbdcnqvbuzbbu755fkwoq2swpsqrebpsh4rp3tjwzt.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 1008) % 20
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/g6/cg6r6dike23cyqqwfqegj6rcuzzn7pmc2rblhy7luc6uem6fyrc7.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [1, 3], [1, 3], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 26880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 6720
x1 = (xindex // 6720)
tmp0 = tl.load(in_ptr0 + (3*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (3*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (3*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tl.store(out_ptr0 + (x0 + (6784*x1)), tmp10, xmask)
tl.store(out_ptr1 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (10, 3, 7, 3), (63, 21, 3, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (100, 1120), (1120, 1))
assert_size_stride(primals_7, (100, ), (1, ))
assert_size_stride(primals_8, (1, 100), (100, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 58, 62), (35960, 3596, 62, 1))
buf1 = empty_strided_cuda((4, 10, 58, 62), (36160, 3616, 62, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 143840, grid=grid(143840), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 10, 58, 20), (11840, 1184, 20, 1), torch.float32)
buf3 = empty_strided_cuda((4, 10, 58, 20), (12800, 1280, 20, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 46400, grid=grid(46400), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 56, 18), (20160, 1008, 18, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 80640, grid=grid(80640), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 20, 56, 6), (6784, 336, 6, 1), torch.int8)
buf7 = empty_strided_cuda((4, 20, 56, 6), (6720, 336, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 26880, grid=grid(26880), stream=stream0)
buf8 = empty_strided_cuda((24, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (24, 1120), (1, 24), 0), reinterpret_tensor(primals_6, (1120, 100), (1, 1120), 0), alpha=1, beta=1, out=buf8)
del primals_7
buf10 = empty_strided_cuda((24, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf10)
del primals_9
return (reinterpret_tensor(buf10, (24, ), (1, ), 0), primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (24, 1120), (1, 24), 0), reinterpret_tensor(primals_6, (1120, 100), (1, 1120), 0), buf8, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 3, 7, 3), (63, 21, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 10, 3, 3), (90, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((100, 1120), (1120, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=10, kernel_size=
(7, 3))
self.pool = nn.MaxPool2d(kernel_size=(1, 3))
self.conv2 = nn.Conv2d(in_channels=10, out_channels=20, kernel_size
=(3, 3))
self.fc1 = nn.Linear(7 * 8 * 20, 100)
self.fc2 = nn.Linear(100, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(7 * 8 * 20, -1)
x = x.permute(1, 0)
x = self.fc1(x)
x = self.fc2(x)
x = torch.squeeze(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 143840
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3596 % 10
x0 = xindex % 3596
x4 = xindex // 3596
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 46400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20 % 58
x2 = xindex // 1160
x3 = xindex % 1160
tmp0 = tl.load(in_ptr0 + (3 * x0 + 62 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 62 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 62 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 > tmp0
tmp6 = tl.full([1], 1, tl.int8)
tmp7 = tl.full([1], 0, tl.int8)
tmp8 = tl.where(tmp5, tmp6, tmp7)
tmp9 = tmp3 > tmp2
tmp10 = tl.full([1], 2, tl.int8)
tmp11 = tl.where(tmp9, tmp10, tmp8)
tl.store(out_ptr0 + (x3 + 1184 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1280 * x2), tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 80640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1008 % 20
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 26880
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 6720
x1 = xindex // 6720
tmp0 = tl.load(in_ptr0 + 3 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 3 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 3 * x2), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tl.store(out_ptr0 + (x0 + 6784 * x1), tmp10, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (10, 3, 7, 3), (63, 21, 3, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (20, 10, 3, 3), (90, 9, 3, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (100, 1120), (1120, 1))
assert_size_stride(primals_7, (100,), (1,))
assert_size_stride(primals_8, (1, 100), (100, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 58, 62), (35960, 3596, 62, 1))
buf1 = empty_strided_cuda((4, 10, 58, 62), (36160, 3616, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(143840)](buf0, primals_2,
buf1, 143840, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 10, 58, 20), (11840, 1184, 20, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 10, 58, 20), (12800, 1280, 20, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(46400)](buf1, buf2,
buf3, 46400, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 20, 56, 18), (20160, 1008, 18, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(80640)](buf5, primals_5,
80640, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 20, 56, 6), (6784, 336, 6, 1), torch.int8
)
buf7 = empty_strided_cuda((4, 20, 56, 6), (6720, 336, 6, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(26880)](buf5, buf6,
buf7, 26880, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((24, 100), (100, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (24, 1120),
(1, 24), 0), reinterpret_tensor(primals_6, (1120, 100), (1,
1120), 0), alpha=1, beta=1, out=buf8)
del primals_7
buf10 = empty_strided_cuda((24, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8,
(100, 1), (1, 100), 0), alpha=1, beta=1, out=buf10)
del primals_9
return (reinterpret_tensor(buf10, (24,), (1,), 0), primals_1, primals_3,
primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (
24, 1120), (1, 24), 0), reinterpret_tensor(primals_6, (1120, 100),
(1, 1120), 0), buf8, primals_8)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=10, kernel_size=
(7, 3))
self.pool = nn.MaxPool2d(kernel_size=(1, 3))
self.conv2 = nn.Conv2d(in_channels=10, out_channels=20, kernel_size
=(3, 3))
self.fc1 = nn.Linear(7 * 8 * 20, 100)
self.fc2 = nn.Linear(100, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| elliottwaissbluth/tensor-hero | Net | false | 6,655 | [
"MIT"
] | 1 | be99ca4380a5ec59c0826e5fc8a87ec0f8956201 | https://github.com/elliottwaissbluth/tensor-hero/tree/be99ca4380a5ec59c0826e5fc8a87ec0f8956201 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=10, kernel_size=
(7, 3))
self.pool = nn.MaxPool2d(kernel_size=(1, 3))
self.conv2 = nn.Conv2d(in_channels=10, out_channels=20, kernel_size
=(3, 3))
self.fc1 = nn.Linear(7 * 8 * 20, 100)
self.fc2 = nn.Linear(100, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(7 * 8 * 20, -1)
x = x.permute(1, 0)
x = self.fc1(x)
x = self.fc2(x)
x = torch.squeeze(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
GaussianSample | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7i/c7iyomkiv6weojw56i3vrcxxd22xw54hnvrnd7whiojwbn4z4p3b.py
# Topologically Sorted Source Nodes: [mul, std, z], Original ATen: [aten.mul, aten.exp, aten.addcmul]
# Source node to ATen node mapping:
# mul => mul
# std => exp
# z => add, mul_1, mul_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %randn), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_2), kwargs = {})
triton_poi_fused_addcmul_exp_mul_0 = async_compile.triton('triton_poi_fused_addcmul_exp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_addcmul_exp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_addcmul_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = 1.0
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp9 = tmp0 + tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_var], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
# Topologically Sorted Source Nodes: [epsilon], Original ATen: [aten.randn]
buf2 = torch.ops.aten.randn.default([4, 4, 4, 4], device=device(type='cuda', index=0), pin_memory=False)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, std, z], Original ATen: [aten.mul, aten.exp, aten.addcmul]
stream0 = get_raw_stream(0)
triton_poi_fused_addcmul_exp_mul_0.run(buf0, buf1, buf3, buf4, 256, grid=grid(256), stream=stream0)
return (buf4, reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Stochastic(nn.Module):
"""
Base stochastic layer that uses the
reparametrization trick [Kingma 2013]
to draw a sample from a distribution
parametrised by mu and log_var.
"""
def reparametrize(self, mu, logvar):
epsilon = torch.randn(mu.size(), requires_grad=False, device=mu.device)
std = logvar.mul(0.5).exp_()
z = mu.addcmul(std, epsilon)
return z
class GaussianSample(Stochastic):
"""
Layer that represents a sample from a
Gaussian distribution.
"""
def __init__(self, in_features, out_features):
super(GaussianSample, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.mu = nn.Linear(in_features, out_features)
self.log_var = nn.Linear(in_features, out_features)
def forward(self, x):
mu = self.mu(x)
log_var = self.log_var(x)
return self.reparametrize(mu, log_var), mu, log_var
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_addcmul_exp_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = 1.0
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp9 = tmp0 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = torch.ops.aten.randn.default([4, 4, 4, 4], device=device(
type='cuda', index=0), pin_memory=False)
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_addcmul_exp_mul_0[grid(256)](buf0, buf1, buf3,
buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf4, reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3
class Stochastic(nn.Module):
"""
Base stochastic layer that uses the
reparametrization trick [Kingma 2013]
to draw a sample from a distribution
parametrised by mu and log_var.
"""
def reparametrize(self, mu, logvar):
epsilon = torch.randn(mu.size(), requires_grad=False, device=mu.device)
std = logvar.mul(0.5).exp_()
z = mu.addcmul(std, epsilon)
return z
class GaussianSampleNew(Stochastic):
"""
Layer that represents a sample from a
Gaussian distribution.
"""
def __init__(self, in_features, out_features):
super(GaussianSampleNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.mu = nn.Linear(in_features, out_features)
self.log_var = nn.Linear(in_features, out_features)
def forward(self, input_0):
primals_1 = self.mu.weight
primals_2 = self.mu.bias
primals_4 = self.log_var.weight
primals_5 = self.log_var.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
| ericli0419/SCALEX | GaussianSample | false | 6,656 | [
"MIT"
] | 1 | 2fedbe4c3287cf86de7b786c98122fe45707416e | https://github.com/ericli0419/SCALEX/tree/2fedbe4c3287cf86de7b786c98122fe45707416e | import torch
import torch.nn as nn
class Stochastic(nn.Module):
"""
Base stochastic layer that uses the
reparametrization trick [Kingma 2013]
to draw a sample from a distribution
parametrised by mu and log_var.
"""
def reparametrize(self, mu, logvar):
epsilon = torch.randn(mu.size(), requires_grad=False, device=mu.device)
std = logvar.mul(0.5).exp_()
z = mu.addcmul(std, epsilon)
return z
class Model(Stochastic):
"""
Layer that represents a sample from a
Gaussian distribution.
"""
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.mu = nn.Linear(in_features, out_features)
self.log_var = nn.Linear(in_features, out_features)
def forward(self, x):
mu = self.mu(x)
log_var = self.log_var(x)
return self.reparametrize(mu, log_var), mu, log_var
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
PatchedSelfAttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/by/cbyavqfpm6cctsjn76scubidnajec26whr355vus7lq6jaaa5rcx.py
# Topologically Sorted Source Nodes: [Q], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# Q => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_2,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gv/cgvn7b6uvga2ixqkzhintkir5fwghmwo5y3grgysll6ab55q4qmp.py
# Topologically Sorted Source Nodes: [attention_mat_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_mat_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [attention_mat_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_mat_1 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q2/cq26fybzn7g3la3efd7ehhsrxi2lr7ymr4nvyqdxrrt6bjjoxks4.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, %view_10), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_out_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf0 # reuse
buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [Q], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, primals_3, buf10, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf1 # reuse
buf9 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [K], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf9, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_mat_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [attention_mat_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [new_values], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), primals_1, out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 1, 4, 4), (16, 1, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf8, primals_1, 64, grid=grid(64), stream=stream0)
return (reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), primals_1, buf6, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), buf9, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.utils.data
import torch
from torch import nn
import torch.nn.functional as F
class SelfAttentionLayer(nn.Module):
def __init__(self, elem_size, embd_size):
super(SelfAttentionLayer, self).__init__()
self.embd_size = embd_size
self.query_lin = nn.Linear(elem_size, embd_size)
self.key_lin = nn.Linear(elem_size, embd_size)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
if len(x.shape) == 3:
x = x.unsqueeze(1)
_N, _num_patches, _seq_size, elem_size = x.shape
Q = F.relu(self.query_lin(x))
K = F.relu(self.key_lin(x))
attention_mat = torch.matmul(Q, K.permute(0, 1, 3, 2)) / math.sqrt(
elem_size)
attention_mat = F.softmax(attention_mat, dim=-1)
new_values = torch.matmul(attention_mat, x)
out = new_values
out = x + out
return out, attention_mat
class PatchedSelfAttentionLayer(nn.Module):
def __init__(self, elem_size, embd_size, window_size, use_V=False):
super(PatchedSelfAttentionLayer, self).__init__()
self.sa_layer = SelfAttentionLayer(elem_size, embd_size)
self.window_size = window_size
self.embd_size = embd_size
def forward(self, x):
N, seq_size, elem_size = x.shape
patches_num = seq_size // self.window_size
add_patch = False
if seq_size % self.window_size != 0:
add_patch = True
x_patches = x[:, :patches_num * self.window_size, :].reshape(N,
patches_num, self.window_size, elem_size)
if add_patch:
rest_seq_padding = torch.zeros(N, 1, x_patches.shape[2],
x_patches.shape[3])
rest_seq_values = x[:, patches_num * self.window_size:, :]
rest_seq_padding[:, 0, :rest_seq_values.shape[1], :
] = rest_seq_values
x_patches = torch.cat([x_patches, rest_seq_padding], dim=1)
x_patches, attention_mat = self.sa_layer(x_patches)
out = x_patches.reshape(x_patches.shape[0], x_patches.shape[1] *
x_patches.shape[2], x_patches.shape[3])[:, :seq_size, :]
attention_mat = attention_mat.reshape(attention_mat.shape[0],
attention_mat.shape[1] * attention_mat.shape[2], attention_mat.
shape[3])[:, :seq_size, :self.window_size]
return out, attention_mat
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'elem_size': 4, 'embd_size': 4, 'window_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.utils.data
import torch
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf2,
primals_3, buf10, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf1
buf9 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf3,
primals_5, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1),
0), primals_1, out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 1, 4, 4), (16, 1, 4, 1), 0)
del buf7
triton_poi_fused_add_3[grid(64)](buf8, primals_1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0
), primals_1, buf6, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), buf9, buf10
class SelfAttentionLayer(nn.Module):
def __init__(self, elem_size, embd_size):
super(SelfAttentionLayer, self).__init__()
self.embd_size = embd_size
self.query_lin = nn.Linear(elem_size, embd_size)
self.key_lin = nn.Linear(elem_size, embd_size)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
if len(x.shape) == 3:
x = x.unsqueeze(1)
_N, _num_patches, _seq_size, elem_size = x.shape
Q = F.relu(self.query_lin(x))
K = F.relu(self.key_lin(x))
attention_mat = torch.matmul(Q, K.permute(0, 1, 3, 2)) / math.sqrt(
elem_size)
attention_mat = F.softmax(attention_mat, dim=-1)
new_values = torch.matmul(attention_mat, x)
out = new_values
out = x + out
return out, attention_mat
class PatchedSelfAttentionLayerNew(nn.Module):
def __init__(self, elem_size, embd_size, window_size, use_V=False):
super(PatchedSelfAttentionLayerNew, self).__init__()
self.sa_layer = SelfAttentionLayer(elem_size, embd_size)
self.window_size = window_size
self.embd_size = embd_size
def forward(self, input_0):
primals_2 = self.sa_layer.query_lin.weight
primals_3 = self.sa_layer.query_lin.bias
primals_4 = self.sa_layer.key_lin.weight
primals_5 = self.sa_layer.key_lin.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| eldadp100/The-Mesh-Transformer | PatchedSelfAttentionLayer | false | 6,657 | [
"MIT"
] | 1 | b3ab18f774251feff1093040dfdcf7b836a43505 | https://github.com/eldadp100/The-Mesh-Transformer/tree/b3ab18f774251feff1093040dfdcf7b836a43505 | import math
import torch
import torch.utils.data
import torch
from torch import nn
import torch.nn.functional as F
class SelfAttentionLayer(nn.Module):
def __init__(self, elem_size, embd_size):
super().__init__()
self.embd_size = embd_size
self.query_lin = nn.Linear(elem_size, embd_size)
self.key_lin = nn.Linear(elem_size, embd_size)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
if len(x.shape) == 3:
x = x.unsqueeze(1)
_N, _num_patches, _seq_size, elem_size = x.shape
Q = F.relu(self.query_lin(x))
K = F.relu(self.key_lin(x))
attention_mat = torch.matmul(Q, K.permute(0, 1, 3, 2)) / math.sqrt(
elem_size)
attention_mat = F.softmax(attention_mat, dim=-1)
new_values = torch.matmul(attention_mat, x)
out = new_values
out = x + out
return out, attention_mat
class Model(nn.Module):
def __init__(self, elem_size, embd_size, window_size, use_V=False):
super().__init__()
self.sa_layer = SelfAttentionLayer(elem_size, embd_size)
self.window_size = window_size
self.embd_size = embd_size
def forward(self, x):
N, seq_size, elem_size = x.shape
patches_num = seq_size // self.window_size
add_patch = False
if seq_size % self.window_size != 0:
add_patch = True
x_patches = x[:, :patches_num * self.window_size, :].reshape(N,
patches_num, self.window_size, elem_size)
if add_patch:
rest_seq_padding = torch.zeros(N, 1, x_patches.shape[2],
x_patches.shape[3])
rest_seq_values = x[:, patches_num * self.window_size:, :]
rest_seq_padding[:, 0, :rest_seq_values.shape[1], :
] = rest_seq_values
x_patches = torch.cat([x_patches, rest_seq_padding], dim=1)
x_patches, attention_mat = self.sa_layer(x_patches)
out = x_patches.reshape(x_patches.shape[0], x_patches.shape[1] *
x_patches.shape[2], x_patches.shape[3])[:, :seq_size, :]
attention_mat = attention_mat.reshape(attention_mat.shape[0],
attention_mat.shape[1] * attention_mat.shape[2], attention_mat.
shape[3])[:, :seq_size, :self.window_size]
return out, attention_mat
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
SpatialAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yy/cyysmxfuvql7zq5273cqqzlv2qec35n7an4xo5tvwvoop7ml7mpp.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %getitem], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64) % 2
x0 = xindex % 64
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (128*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (64 + x0 + (128*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 2.0
tmp9 = tmp7 / tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 2, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr0 + (x0 + (128*x2)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr0 + (64 + x0 + (128*x2)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp12, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp11, tmp19)
tl.store(out_ptr0 + (x3), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2h/c2h7x7vqvl2ldlya5jpbhvac4kxygymipwfzvir2qf4zuwykft5n.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 64), (128, 64, 1))
assert_size_stride(primals_2, (1, 2, 7), (14, 7, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 64), (128, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 64), (64, 64, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 2, 64), (128, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 7), (14, 7, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv1d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 2, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64 % 2
x0 = xindex % 64
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 128 * x2), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (64 + x0 + 128 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = 2.0
tmp9 = tmp7 / tmp8
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp15 = tl.load(in_ptr0 + (x0 + 128 * x2), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr0 + (64 + x0 + 128 * x2), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp12, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp11, tmp19)
tl.store(out_ptr0 + x3, tmp20, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 64), (128, 64, 1))
assert_size_stride(primals_2, (1, 2, 7), (14, 7, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 64), (128, 64, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, 512, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(3,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 64), (64, 64, 1))
buf2 = buf1
del buf1
triton_poi_fused_sigmoid_1[grid(256)](buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf2, primals_2, buf0, buf2
class SpatialAttentionNew(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttentionNew, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv1d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| esbgkannan/GT-CNN | SpatialAttention | false | 6,658 | [
"MIT"
] | 1 | 4f3828d7ed8f6c3ed796fa4e2e166ef5c16cb3d9 | https://github.com/esbgkannan/GT-CNN/tree/4f3828d7ed8f6c3ed796fa4e2e166ef5c16cb3d9 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, kernel_size=7):
super().__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv1d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 2, 64])]
def get_init_inputs():
return []
|
MyLinear | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6y/c6yevfu5wififyyachaj7ayocvlavstnke4dkrounjkln2io7i2q.py
# Topologically Sorted Source Nodes: [linear, relu], Original ATen: [aten.add, aten.relu]
# Source node to ATen node mapping:
# linear => add
# relu => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %arg2_1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
triton_poi_fused_add_relu_0 = async_compile.triton('triton_poi_fused_add_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(arg1_1, (64, 4), (4, 1), 0), arg0_1, out=buf0)
del arg0_1
del arg1_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [linear, relu], Original ATen: [aten.add, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_0.run(buf1, arg2_1, 256, grid=grid(256), stream=stream0)
del arg2_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class MyLinear(nn.Module):
def __init__(self, in_units, units):
super().__init__()
self.weight = nn.Parameter(torch.randn(in_units, units))
self.bias = nn.Parameter(torch.randn(units))
def forward(self, X):
linear = torch.matmul(X, self.weight.data) + self.bias.data
return F.relu(linear)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_units': 4, 'units': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(arg1_1, (64, 4), (4, 1), 0),
arg0_1, out=buf0)
del arg0_1
del arg1_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(256)](buf1, arg2_1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg2_1
return buf1,
class MyLinearNew(nn.Module):
def __init__(self, in_units, units):
super().__init__()
self.weight = nn.Parameter(torch.randn(in_units, units))
self.bias = nn.Parameter(torch.randn(units))
def forward(self, input_0):
arg0_1 = self.weight
arg2_1 = self.bias
arg1_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| eunice012716/Intern-Training | MyLinear | false | 6,659 | [
"MIT"
] | 1 | c3bbf42448a0b41e96d88569b6cfd57d78338716 | https://github.com/eunice012716/Intern-Training/tree/c3bbf42448a0b41e96d88569b6cfd57d78338716 | import torch
from torch import nn
from torch.nn import functional as F
class Model(nn.Module):
def __init__(self, in_units, units):
super().__init__()
self.weight = nn.Parameter(torch.randn(in_units, units))
self.bias = nn.Parameter(torch.randn(units))
def forward(self, X):
linear = torch.matmul(X, self.weight.data) + self.bias.data
return F.relu(linear)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BCE_Dice | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gh/cghhaxuvivm34ud4npjhjyz4a3cmd4xigw4ge46wba623vwavfqe.py
# Topologically Sorted Source Nodes: [prob, mul, intersection, mul_1, add_1, add, union, add_2, iou, loss, bce, add_3], Original ATen: [aten.sigmoid, aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.binary_cross_entropy_with_logits]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# bce => abs_1, exp, full_default, log1p, mean, minimum, mul_2, neg, sub_1, sub_2, sub_3
# intersection => sum_1
# iou => div
# loss => sub
# mul => mul
# mul_1 => mul_1
# prob => sigmoid
# union => sum_2
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, %arg1_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %sub_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mean), kwargs = {})
triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sigmoid_sum_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sigmoid_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sigmoid_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp1 + tmp2
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 1.0
tmp12 = tmp11 - tmp2
tmp13 = tmp12 * tmp0
tmp14 = 0.0
tmp15 = triton_helpers.minimum(tmp14, tmp0)
tmp16 = tl_math.abs(tmp0)
tmp17 = -tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = libdevice.log1p(tmp18)
tmp20 = tmp15 - tmp19
tmp21 = tmp13 - tmp20
tmp22 = tl.broadcast_to(tmp21, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tmp25 = 2.0
tmp26 = tmp6 * tmp25
tmp27 = tmp26 + tmp11
tmp28 = tmp10 + tmp11
tmp29 = tmp27 / tmp28
tmp30 = tmp11 - tmp29
tmp31 = 256.0
tmp32 = tmp24 / tmp31
tmp33 = tmp30 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp33, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [prob, mul, intersection, mul_1, add_1, add, union, add_2, iou, loss, bce, add_3], Original ATen: [aten.sigmoid, aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.binary_cross_entropy_with_logits]
stream0 = get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sigmoid_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def IoU(logit, truth, smooth=1):
prob = torch.sigmoid(logit)
intersection = torch.sum(prob * truth)
union = torch.sum(prob + truth)
iou = (2 * intersection + smooth) / (union + smooth)
return iou
class DiceLoss(nn.Module):
def __init__(self, smooth=1):
super(DiceLoss, self).__init__()
self.smooth = smooth
def forward(self, logit, truth):
iou = IoU(logit, truth, self.smooth)
loss = 1 - iou
return loss
class BCE_Dice(nn.Module):
def __init__(self, smooth=1):
super(BCE_Dice, self).__init__()
self.smooth = smooth
self.dice = DiceLoss(smooth=smooth)
self.bce = nn.BCEWithLogitsLoss()
def forward(self, logit, truth):
dice = self.dice(logit, truth)
bce = self.bce(logit, truth)
return dice + bce
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sigmoid_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp1 + tmp2
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 1.0
tmp12 = tmp11 - tmp2
tmp13 = tmp12 * tmp0
tmp14 = 0.0
tmp15 = triton_helpers.minimum(tmp14, tmp0)
tmp16 = tl_math.abs(tmp0)
tmp17 = -tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = libdevice.log1p(tmp18)
tmp20 = tmp15 - tmp19
tmp21 = tmp13 - tmp20
tmp22 = tl.broadcast_to(tmp21, [RBLOCK])
tmp24 = triton_helpers.promote_to_tensor(tl.sum(tmp22, 0))
tmp25 = 2.0
tmp26 = tmp6 * tmp25
tmp27 = tmp26 + tmp11
tmp28 = tmp10 + tmp11
tmp29 = tmp27 / tmp28
tmp30 = tmp11 - tmp29
tmp31 = 256.0
tmp32 = tmp24 / tmp31
tmp33 = tmp30 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp33, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_div_mul_rsub_sigmoid_sum_0[
grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
def IoU(logit, truth, smooth=1):
prob = torch.sigmoid(logit)
intersection = torch.sum(prob * truth)
union = torch.sum(prob + truth)
iou = (2 * intersection + smooth) / (union + smooth)
return iou
class DiceLoss(nn.Module):
def __init__(self, smooth=1):
super(DiceLoss, self).__init__()
self.smooth = smooth
def forward(self, logit, truth):
iou = IoU(logit, truth, self.smooth)
loss = 1 - iou
return loss
class BCE_DiceNew(nn.Module):
def __init__(self, smooth=1):
super(BCE_DiceNew, self).__init__()
self.smooth = smooth
self.dice = DiceLoss(smooth=smooth)
self.bce = nn.BCEWithLogitsLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| evilidol/kaggle-Steel-Defect-Detection | BCE_Dice | false | 6,660 | [
"MIT"
] | 1 | 41e3e360f49d706c8c79bcd442342c529648a736 | https://github.com/evilidol/kaggle-Steel-Defect-Detection/tree/41e3e360f49d706c8c79bcd442342c529648a736 | import torch
import torch.nn as nn
def IoU(logit, truth, smooth=1):
prob = torch.sigmoid(logit)
intersection = torch.sum(prob * truth)
union = torch.sum(prob + truth)
iou = (2 * intersection + smooth) / (union + smooth)
return iou
class DiceLoss(nn.Module):
def __init__(self, smooth=1):
super().__init__()
self.smooth = smooth
def forward(self, logit, truth):
iou = IoU(logit, truth, self.smooth)
loss = 1 - iou
return loss
class Model(nn.Module):
def __init__(self, smooth=1):
super().__init__()
self.smooth = smooth
self.dice = DiceLoss(smooth=smooth)
self.bce = nn.BCEWithLogitsLoss()
def forward(self, logit, truth):
dice = self.dice(logit, truth)
bce = self.bce(logit, truth)
return dice + bce
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
PrimaryCaps | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ik/cikwp3af5pjsypo2wrobdh3ptoj7qfddlg24vmarkhoiio3wrbgn.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (131072*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/e3/ce372j34spt6n6c73ychhfy2m6umubchn3rihx4ayqwhd2kzp33y.py
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# a => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2t/c2tugev72iiutoyluzyegaw24yp42ecqfaf34heczmmn66qg4w7y.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution, %sigmoid], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8912896
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 544
x0 = xindex % 4096
x2 = (xindex // 2228224)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((512*x0) + (2097152*x2) + x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 544, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + ((32*x0) + (131072*x2) + ((-512) + x1)), tmp10, eviction_policy='evict_last', other=0.0)
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp4, tmp9, tmp16)
tl.store(out_ptr0 + (x3), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (512, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_2, (512, ), (1, ))
assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_5, (32, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 128, 4096, grid=grid(128, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [p], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 512, 64, 64), (2097152, 1, 32768, 512))
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [a], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 544, 64, 64), (2228224, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf1, primals_2, buf3, buf4, 8912896, grid=grid(8912896), stream=stream0)
del buf1
del primals_2
return (reinterpret_tensor(buf4, (4, 64, 64, 544), (2228224, 64, 1, 4096), 0), primals_1, buf0, primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((512, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 32, 64, 64), (131072, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PrimaryCaps(nn.Module):
"""Creates a primary convolutional capsule layer
that outputs a pose matrix and an activation.
Note that for computation convenience, pose matrix
are stored in first part while the activations are
stored in the second part.
Args:
A: output of the normal conv layer
B: number of types of capsules
K: kernel size of convolution
P: size of pose matrix is P*P
stride: stride of convolution
Shape:
input: (*, A, h, w)
output: (*, h', w', B*(P*P+1))
h', w' is computed the same way as convolution layer
parameter size is: K*K*A*B*P*P + B*P*P
"""
def __init__(self, A=32, B=32, K=1, P=4, stride=1):
super(PrimaryCaps, self).__init__()
self.pose = nn.Conv2d(in_channels=A, out_channels=B * P * P,
kernel_size=K, stride=stride, bias=True)
self.a = nn.Conv2d(in_channels=A, out_channels=B, kernel_size=K,
stride=stride, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
p = self.pose(x)
a = self.a(x)
a = self.sigmoid(a)
out = torch.cat([p, a], dim=1)
out = out.permute(0, 2, 3, 1)
return out
def get_inputs():
return [torch.rand([4, 32, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 131072 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 544
x0 = xindex % 4096
x2 = xindex // 2228224
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (512 * x0 + 2097152 * x2 + x1), tmp4,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 544, tl.int64)
tmp13 = tl.load(in_ptr2 + (32 * x0 + 131072 * x2 + (-512 + x1)), tmp10,
eviction_policy='evict_last', other=0.0)
tmp14 = tl.sigmoid(tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp4, tmp9, tmp16)
tl.store(out_ptr0 + x3, tmp17, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (512, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_2, (512,), (1,))
assert_size_stride(primals_3, (4, 32, 64, 64), (131072, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 32, 64, 64), (131072, 1, 2048, 32),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(128, 4096)](primals_3, buf0, 128, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 512, 64, 64), (2097152, 1, 32768, 512))
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(524288)](buf3, primals_5,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 544, 64, 64), (2228224, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_2[grid(8912896)](buf1, primals_2, buf3, buf4,
8912896, XBLOCK=512, num_warps=8, num_stages=1)
del buf1
del primals_2
return reinterpret_tensor(buf4, (4, 64, 64, 544), (2228224, 64, 1, 4096), 0
), primals_1, buf0, primals_4, buf3
class PrimaryCapsNew(nn.Module):
"""Creates a primary convolutional capsule layer
that outputs a pose matrix and an activation.
Note that for computation convenience, pose matrix
are stored in first part while the activations are
stored in the second part.
Args:
A: output of the normal conv layer
B: number of types of capsules
K: kernel size of convolution
P: size of pose matrix is P*P
stride: stride of convolution
Shape:
input: (*, A, h, w)
output: (*, h', w', B*(P*P+1))
h', w' is computed the same way as convolution layer
parameter size is: K*K*A*B*P*P + B*P*P
"""
def __init__(self, A=32, B=32, K=1, P=4, stride=1):
super(PrimaryCapsNew, self).__init__()
self.pose = nn.Conv2d(in_channels=A, out_channels=B * P * P,
kernel_size=K, stride=stride, bias=True)
self.a = nn.Conv2d(in_channels=A, out_channels=B, kernel_size=K,
stride=stride, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.pose.weight
primals_2 = self.pose.bias
primals_4 = self.a.weight
primals_5 = self.a.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| esdrascosta/Matrix-Capsules | PrimaryCaps | false | 6,661 | [
"MIT"
] | 1 | ddf35dfa1acfb51a11a3ec27e15fe863a2ff6fa4 | https://github.com/esdrascosta/Matrix-Capsules/tree/ddf35dfa1acfb51a11a3ec27e15fe863a2ff6fa4 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Creates a primary convolutional capsule layer
that outputs a pose matrix and an activation.
Note that for computation convenience, pose matrix
are stored in first part while the activations are
stored in the second part.
Args:
A: output of the normal conv layer
B: number of types of capsules
K: kernel size of convolution
P: size of pose matrix is P*P
stride: stride of convolution
Shape:
input: (*, A, h, w)
output: (*, h', w', B*(P*P+1))
h', w' is computed the same way as convolution layer
parameter size is: K*K*A*B*P*P + B*P*P
"""
def __init__(self, A=32, B=32, K=1, P=4, stride=1):
super().__init__()
self.pose = nn.Conv2d(in_channels=A, out_channels=B * P * P,
kernel_size=K, stride=stride, bias=True)
self.a = nn.Conv2d(in_channels=A, out_channels=B, kernel_size=K,
stride=stride, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
p = self.pose(x)
a = self.a(x)
a = self.sigmoid(a)
out = torch.cat([p, a], dim=1)
out = out.permute(0, 2, 3, 1)
return out
def get_inputs():
return [torch.rand([4, 32, 64, 64])]
def get_init_inputs():
return []
|
Highway | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/mx/cmxbsuduhrb3re3kyyjzil6fiub7wk6y3jw7tjv4rnhxateuwfne.py
# Topologically Sorted Source Nodes: [x_proj, x_gate, mul, sub, mul_1, x_highway], Original ATen: [aten.relu, aten.sigmoid, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# sub => sub
# x_gate => sigmoid
# x_highway => add
# x_proj => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %relu), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_relu_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_mul_relu_rsub_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp8 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_proj, x_gate, mul, sub, mul_1, x_highway], Original ATen: [aten.relu, aten.sigmoid, aten.mul, aten.rsub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0.run(buf1, buf0, primals_3, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_3, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils
import torch.onnx
class Highway(nn.Module):
def __init__(self, e_word):
super(Highway, self).__init__()
self.embed_size = e_word
self.w_proj = nn.Linear(self.embed_size, self.embed_size, bias=True)
self.w_gate = nn.Linear(self.embed_size, self.embed_size, bias=True)
def forward(self, x_convout: 'torch.Tensor') ->torch.Tensor:
x_proj = F.relu(self.w_proj(x_convout))
sig = nn.Sigmoid()
x_gate = sig(self.w_gate(x_convout))
x_highway = x_gate * x_proj + (1 - x_gate) * x_convout
return x_highway
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'e_word': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.utils
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp8 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tmp1 * tmp4
tmp6 = 1.0
tmp7 = tmp6 - tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp5 + tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf1, buf0,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_3, buf0, buf1
class HighwayNew(nn.Module):
def __init__(self, e_word):
super(HighwayNew, self).__init__()
self.embed_size = e_word
self.w_proj = nn.Linear(self.embed_size, self.embed_size, bias=True)
self.w_gate = nn.Linear(self.embed_size, self.embed_size, bias=True)
def forward(self, input_0):
primals_1 = self.w_proj.weight
primals_2 = self.w_proj.bias
primals_4 = self.w_gate.weight
primals_5 = self.w_gate.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| evazhang612/honygenerator | Highway | false | 6,662 | [
"MIT"
] | 1 | cafcf1736faba978ecaed624b949ebc1498477ee | https://github.com/evazhang612/honygenerator/tree/cafcf1736faba978ecaed624b949ebc1498477ee | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils
import torch.onnx
class Model(nn.Module):
def __init__(self, e_word):
super().__init__()
self.embed_size = e_word
self.w_proj = nn.Linear(self.embed_size, self.embed_size, bias=True)
self.w_gate = nn.Linear(self.embed_size, self.embed_size, bias=True)
def forward(self, x_convout: 'torch.Tensor') ->torch.Tensor:
x_proj = F.relu(self.w_proj(x_convout))
sig = nn.Sigmoid()
x_gate = sig(self.w_gate(x_convout))
x_highway = x_gate * x_proj + (1 - x_gate) * x_convout
return x_highway
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
P2SActivationLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gx/cgxa3ynix245ic4ukdndmwpojaikvex2djgvdniumettuh76k6xs.py
# Topologically Sorted Source Nodes: [renorm, w], Original ATen: [aten.renorm, aten.mul]
# Source node to ATen node mapping:
# renorm => add, full_default, gt, mul, mul_1, pow_1, pow_2, reciprocal, sum_1, where
# w => mul_2
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [0], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%pow_2, 1e-05), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-07), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1e-05), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %full_default), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %where), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, 100000.0), kwargs = {})
triton_poi_fused_mul_renorm_0 = async_compile.triton('triton_poi_fused_mul_renorm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_renorm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_renorm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 > tmp13
tmp15 = 1e-07
tmp16 = tmp12 + tmp15
tmp17 = tl.full([1], 1, tl.int32)
tmp18 = tmp17 / tmp16
tmp19 = tmp18 * tmp13
tmp20 = 1.0
tmp21 = tl.where(tmp14, tmp19, tmp20)
tmp22 = tmp0 * tmp21
tmp23 = 100000.0
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + (x2), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q2/cq2ynu7pk3y34gqjnwjbvmojd33ymfglb7cwfysube6dasadwaya.py
# Topologically Sorted Source Nodes: [cos_theta, cos_theta_1], Original ATen: [aten.div, aten.clamp, aten.ge, aten.le, aten.logical_and]
# Source node to ATen node mapping:
# cos_theta => div
# cos_theta_1 => clamp_max, clamp_min
# Graph fragment:
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm, %view), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, -1), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%div, -1), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%div, 1), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {})
triton_poi_fused_clamp_div_ge_le_logical_and_1 = async_compile.triton('triton_poi_fused_clamp_div_ge_le_logical_and_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_ge_le_logical_and_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_ge_le_logical_and_1(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tmp14 = -1.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = 1.0
tmp17 = triton_helpers.minimum(tmp15, tmp16)
tmp18 = tmp13 >= tmp14
tmp19 = tmp13 <= tmp16
tmp20 = tmp18 & tmp19
tl.store(out_ptr0 + (x2), tmp17, xmask)
tl.store(out_ptr1 + (x2), tmp20, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [renorm, w], Original ATen: [aten.renorm, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_renorm_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [renorm, w, inner_wx], Original ATen: [aten.renorm, aten.mul, aten.mm]
extern_kernels.mm(primals_2, buf0, out=buf1)
buf2 = buf1; del buf1 # reuse
buf3 = buf0; del buf0 # reuse
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [cos_theta, cos_theta_1], Original ATen: [aten.div, aten.clamp, aten.ge, aten.le, aten.logical_and]
triton_poi_fused_clamp_div_ge_le_logical_and_1.run(buf2, primals_2, buf3, buf4, 16, grid=grid(16), stream=stream0)
del buf2
return (buf3, primals_1, primals_2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as torch_nn
from torch.nn import Parameter
import torch.utils
class P2SActivationLayer(torch_nn.Module):
""" Output layer that produces cos heta between activation vector x
and class vector w_j
in_dim: dimension of input feature vectors
output_dim: dimension of output feature vectors
(i.e., number of classes)
Usage example:
batchsize = 64
input_dim = 10
class_num = 5
l_layer = P2SActivationLayer(input_dim, class_num)
l_loss = P2SGradLoss()
data = torch.rand(batchsize, input_dim, requires_grad=True)
target = (torch.rand(batchsize) * class_num).clamp(0, class_num-1)
target = target.to(torch.long)
scores = l_layer(data)
loss = l_loss(scores, target)
loss.backward()
"""
def __init__(self, in_dim, out_dim):
super(P2SActivationLayer, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.weight = Parameter(torch.Tensor(in_dim, out_dim))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
return
def forward(self, input_feat):
"""
Compute P2sgrad activation
input:
------
input_feat: tensor (batchsize, input_dim)
output:
-------
tensor (batchsize, output_dim)
"""
w = self.weight.renorm(2, 1, 1e-05).mul(100000.0)
x_modulus = input_feat.pow(2).sum(1).pow(0.5)
w.pow(2).sum(0).pow(0.5)
inner_wx = input_feat.mm(w)
cos_theta = inner_wx / x_modulus.view(-1, 1)
cos_theta = cos_theta.clamp(-1, 1)
return cos_theta
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as torch_nn
from torch.nn import Parameter
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_renorm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-05
tmp14 = tmp12 > tmp13
tmp15 = 1e-07
tmp16 = tmp12 + tmp15
tmp17 = tl.full([1], 1, tl.int32)
tmp18 = tmp17 / tmp16
tmp19 = tmp18 * tmp13
tmp20 = 1.0
tmp21 = tl.where(tmp14, tmp19, tmp20)
tmp22 = tmp0 * tmp21
tmp23 = 100000.0
tmp24 = tmp22 * tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_clamp_div_ge_le_logical_and_1(in_out_ptr0, in_ptr0,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tmp14 = -1.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = 1.0
tmp17 = triton_helpers.minimum(tmp15, tmp16)
tmp18 = tmp13 >= tmp14
tmp19 = tmp13 <= tmp16
tmp20 = tmp18 & tmp19
tl.store(out_ptr0 + x2, tmp17, xmask)
tl.store(out_ptr1 + x2, tmp20, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_renorm_0[grid(16)](primals_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, buf0, out=buf1)
buf2 = buf1
del buf1
buf3 = buf0
del buf0
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_clamp_div_ge_le_logical_and_1[grid(16)](buf2,
primals_2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
return buf3, primals_1, primals_2, buf4
class P2SActivationLayerNew(torch_nn.Module):
""" Output layer that produces cos heta between activation vector x
and class vector w_j
in_dim: dimension of input feature vectors
output_dim: dimension of output feature vectors
(i.e., number of classes)
Usage example:
batchsize = 64
input_dim = 10
class_num = 5
l_layer = P2SActivationLayer(input_dim, class_num)
l_loss = P2SGradLoss()
data = torch.rand(batchsize, input_dim, requires_grad=True)
target = (torch.rand(batchsize) * class_num).clamp(0, class_num-1)
target = target.to(torch.long)
scores = l_layer(data)
loss = l_loss(scores, target)
loss.backward()
"""
def __init__(self, in_dim, out_dim):
super(P2SActivationLayerNew, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.weight = Parameter(torch.Tensor(in_dim, out_dim))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
return
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| eurecom-asp/raw-pc-darts-anti-spoofing | P2SActivationLayer | false | 6,663 | [
"MIT"
] | 1 | f2dcb5a8fc0cb811328a341a9bd90ffb292adaa1 | https://github.com/eurecom-asp/raw-pc-darts-anti-spoofing/tree/f2dcb5a8fc0cb811328a341a9bd90ffb292adaa1 | import torch
import torch.nn as torch_nn
from torch.nn import Parameter
import torch.utils
class Model(torch_nn.Module):
""" Output layer that produces cos heta between activation vector x
and class vector w_j
in_dim: dimension of input feature vectors
output_dim: dimension of output feature vectors
(i.e., number of classes)
Usage example:
batchsize = 64
input_dim = 10
class_num = 5
l_layer = P2SActivationLayer(input_dim, class_num)
l_loss = P2SGradLoss()
data = torch.rand(batchsize, input_dim, requires_grad=True)
target = (torch.rand(batchsize) * class_num).clamp(0, class_num-1)
target = target.to(torch.long)
scores = l_layer(data)
loss = l_loss(scores, target)
loss.backward()
"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.weight = Parameter(torch.Tensor(in_dim, out_dim))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0)
return
def forward(self, input_feat):
"""
Compute P2sgrad activation
input:
------
input_feat: tensor (batchsize, input_dim)
output:
-------
tensor (batchsize, output_dim)
"""
w = self.weight.renorm(2, 1, 1e-05).mul(100000.0)
x_modulus = input_feat.pow(2).sum(1).pow(0.5)
w.pow(2).sum(0).pow(0.5)
inner_wx = input_feat.mm(w)
cos_theta = inner_wx / x_modulus.view(-1, 1)
cos_theta = cos_theta.clamp(-1, 1)
return cos_theta
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
ChannelGate2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3a/c3a2kvvniyeubzpxsitgrxifqubpbz6atwleemyozs2mnjp762to.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lp/clprvnh5p6cmadxtwzizwydrpjlwxohxixbw4ntucp6srbu6gtis.py
# Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# x_4 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (2, ), (1, ))
assert_size_stride(primals_4, (4, 2, 1, 1), (2, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 2, 1, 1), (2, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 8, grid=grid(8), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(primals_1, buf5, buf6, 256, grid=grid(256), stream=stream0)
return (buf6, primals_1, primals_2, primals_4, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 2, 1, 1), (2, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ChannelGate2d(nn.Module):
def __init__(self, channels, reduction=2):
super(ChannelGate2d, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (4, 2, 1, 1), (2, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 2, 1, 1), (2, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(8)](buf3, primals_3, 8,
XBLOCK=8, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf6, primals_1, primals_2, primals_4, buf1, buf3, buf5
class ChannelGate2dNew(nn.Module):
def __init__(self, channels, reduction=2):
super(ChannelGate2dNew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| evilidol/kaggle-Steel-Defect-Detection | ChannelGate2d | false | 6,664 | [
"MIT"
] | 1 | 41e3e360f49d706c8c79bcd442342c529648a736 | https://github.com/evilidol/kaggle-Steel-Defect-Detection/tree/41e3e360f49d706c8c79bcd442342c529648a736 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, channels, reduction=2):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# att_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# att_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xp/cxp3ouwpdhdlmipppq44wjaey2obmthzec7uqoddmpoigfmupxdx.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (48, 12, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (48, 1, 12), 4), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [att_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf0, (4, 4, 4), (48, 12, 1), 8), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf4, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
buf6 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_5
return (reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf3, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf0, (4, 4, 4), (48, 1, 12), 8), reinterpret_tensor(buf0, (4, 4, 4), (48, 1, 12), 0), reinterpret_tensor(buf0, (4, 4, 4), (48, 12, 1), 4), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
class SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.qkv = nn.Linear(config.n_embd, config.n_embd * 3)
self.split_size = config.n_embd
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.register_buffer('mask', torch.tril(torch.ones(config.
block_size, config.block_size)).view(1, 1, config.block_size,
config.block_size))
self.n_head = config.n_head
def forward(self, x, attn_mask=None):
B, T, C = x.size()
q, k, v = self.qkv(x).split(self.split_size, 2)
att = q @ k.transpose(-2, -1) * (1.0 / math.sqrt(k.size(-1)))
if attn_mask is not None:
att = att + attn_mask
att = F.softmax(att, dim=-1)
y = att @ v
y = y.transpose(1, 2).contiguous().view(B, T, C)
y = self.proj(y)
return y, att
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(n_embd=4, n_head=4, block_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (48, 12, 1),
0), reinterpret_tensor(buf0, (4, 4, 4), (48, 1, 12), 4), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
extern_kernels.bmm(buf3, reinterpret_tensor(buf0, (4, 4, 4), (48,
12, 1), 8), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf4, buf5, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (16, 4), (4, 1), 0)
del buf4
extern_kernels.addmm(primals_5, reinterpret_tensor(buf5, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del primals_5
return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0
), buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf5, (16, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf0, (4, 4, 4), (48, 1, 12), 8
), reinterpret_tensor(buf0, (4, 4, 4), (48, 1, 12), 0
), reinterpret_tensor(buf0, (4, 4, 4), (48, 12, 1), 4)
class SelfAttentionNew(nn.Module):
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.qkv = nn.Linear(config.n_embd, config.n_embd * 3)
self.split_size = config.n_embd
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.register_buffer('mask', torch.tril(torch.ones(config.
block_size, config.block_size)).view(1, 1, config.block_size,
config.block_size))
self.n_head = config.n_head
def forward(self, input_0):
primals_2 = self.qkv.weight
primals_3 = self.qkv.bias
primals_4 = self.proj.weight
primals_5 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| evelynmitchell/rasp | SelfAttention | false | 6,665 | [
"MIT"
] | 1 | 9b33bbf911e6c4ff018c9883c39eb698c0abe803 | https://github.com/evelynmitchell/rasp/tree/9b33bbf911e6c4ff018c9883c39eb698c0abe803 | from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
class Model(nn.Module):
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.qkv = nn.Linear(config.n_embd, config.n_embd * 3)
self.split_size = config.n_embd
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.register_buffer('mask', torch.tril(torch.ones(config.
block_size, config.block_size)).view(1, 1, config.block_size,
config.block_size))
self.n_head = config.n_head
def forward(self, x, attn_mask=None):
B, T, C = x.size()
q, k, v = self.qkv(x).split(self.split_size, 2)
att = q @ k.transpose(-2, -1) * (1.0 / math.sqrt(k.size(-1)))
if attn_mask is not None:
att = att + attn_mask
att = F.softmax(att, dim=-1)
y = att @ v
y = y.transpose(1, 2).contiguous().view(B, T, C)
y = self.proj(y)
return y, att
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pb/cpb4pgnntm3itt4owj4eounc4i6k7fwpujg7rpewg5fpoiciaxnt.py
# Topologically Sorted Source Nodes: [prob, mul, intersection, mul_1, add_1, add, union, add_2, iou, loss], Original ATen: [aten.sigmoid, aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# intersection => sum_1
# iou => div
# loss => sub
# mul => mul
# mul_1 => mul_1
# prob => sigmoid
# union => sum_2
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, %arg1_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sigmoid_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sigmoid_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sigmoid_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp1 + tmp2
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 2.0
tmp12 = tmp6 * tmp11
tmp13 = 1.0
tmp14 = tmp12 + tmp13
tmp15 = tmp10 + tmp13
tmp16 = tmp14 / tmp15
tmp17 = tmp13 - tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [prob, mul, intersection, mul_1, add_1, add, union, add_2, iou, loss], Original ATen: [aten.sigmoid, aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sigmoid_sum_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def IoU(logit, truth, smooth=1):
prob = torch.sigmoid(logit)
intersection = torch.sum(prob * truth)
union = torch.sum(prob + truth)
iou = (2 * intersection + smooth) / (union + smooth)
return iou
class DiceLoss(nn.Module):
def __init__(self, smooth=1):
super(DiceLoss, self).__init__()
self.smooth = smooth
def forward(self, logit, truth):
iou = IoU(logit, truth, self.smooth)
loss = 1 - iou
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp1 + tmp2
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 2.0
tmp12 = tmp6 * tmp11
tmp13 = 1.0
tmp14 = tmp12 + tmp13
tmp15 = tmp10 + tmp13
tmp16 = tmp14 / tmp15
tmp17 = tmp13 - tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sigmoid_sum_0[grid(1)](buf2,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
def IoU(logit, truth, smooth=1):
prob = torch.sigmoid(logit)
intersection = torch.sum(prob * truth)
union = torch.sum(prob + truth)
iou = (2 * intersection + smooth) / (union + smooth)
return iou
class DiceLossNew(nn.Module):
def __init__(self, smooth=1):
super(DiceLossNew, self).__init__()
self.smooth = smooth
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| evilidol/kaggle-Steel-Defect-Detection | DiceLoss | false | 6,666 | [
"MIT"
] | 1 | 41e3e360f49d706c8c79bcd442342c529648a736 | https://github.com/evilidol/kaggle-Steel-Defect-Detection/tree/41e3e360f49d706c8c79bcd442342c529648a736 | import torch
import torch.nn as nn
def IoU(logit, truth, smooth=1):
prob = torch.sigmoid(logit)
intersection = torch.sum(prob * truth)
union = torch.sum(prob + truth)
iou = (2 * intersection + smooth) / (union + smooth)
return iou
class Model(nn.Module):
def __init__(self, smooth=1):
super().__init__()
self.smooth = smooth
def forward(self, logit, truth):
iou = IoU(logit, truth, self.smooth)
loss = 1 - iou
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
HorizontalMaxPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ma/cmamwx5pzfl7ui4xihdrse7u4abzdvpalywvuwdsmh362iq42kjh.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class HorizontalMaxPool2d(nn.Module):
def __init__(self):
super(HorizontalMaxPool2d, self).__init__()
def forward(self, x):
inp_size = x.size()
return nn.functional.max_pool2d(input=x, kernel_size=(1, inp_size[3]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class HorizontalMaxPool2dNew(nn.Module):
def __init__(self):
super(HorizontalMaxPool2dNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| existentmember7/TEMP_monitor | HorizontalMaxPool2d | false | 6,667 | [
"MIT"
] | 1 | b8116f4c134793c4caa22eda78f90dd24d0cad30 | https://github.com/existentmember7/TEMP_monitor/tree/b8116f4c134793c4caa22eda78f90dd24d0cad30 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
inp_size = x.size()
return nn.functional.max_pool2d(input=x, kernel_size=(1, inp_size[3]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SpatialGate2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py
# Topologically Sorted Source Nodes: [cal], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# cal => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/my/cmyzznlnv3aykf56hqxijqkl7hycovubzmkxdtihopwgojtdv2p3.py
# Topologically Sorted Source Nodes: [cal_1, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# cal_1 => sigmoid
# mul => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_3), kwargs = {})
triton_poi_fused_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x3), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [cal], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [cal], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cal_1, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_1.run(buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SpatialGate2d(nn.Module):
def __init__(self, in_channels):
super(SpatialGate2d, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 1, kernel_size=1, stride=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
cal = self.conv1(x)
cal = self.sigmoid(cal)
return cal * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr1 + x3, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_1[grid(256)](buf1, primals_3, buf2,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
class SpatialGate2dNew(nn.Module):
def __init__(self, in_channels):
super(SpatialGate2dNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels, 1, kernel_size=1, stride=1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| evilidol/kaggle-Steel-Defect-Detection | SpatialGate2d | false | 6,668 | [
"MIT"
] | 1 | 41e3e360f49d706c8c79bcd442342c529648a736 | https://github.com/evilidol/kaggle-Steel-Defect-Detection/tree/41e3e360f49d706c8c79bcd442342c529648a736 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, 1, kernel_size=1, stride=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
cal = self.conv1(x)
cal = self.sigmoid(cal)
return cal * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
DiceCELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/57/c57v5cn6r2wadtitlnuesqzkluaqmskzi7iufshbdr2zb6vlxfjk.py
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o, ce_loss], Original ATen: [aten.mul, aten.sum, aten._log_softmax]
# Source node to ATen node mapping:
# ce_loss => amax, sub
# ground_o => sum_2
# intersection => sum_1
# mul => mul
# pred_o => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [2, 3]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [2, 3]), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_per_fused__log_softmax_mul_sum_0 = async_compile.triton('triton_per_fused__log_softmax_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp15 = tl.load(in_ptr1 + (r1 + (64*x3)), xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (16 + r1 + (64*x3)), xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (32 + r1 + (64*x3)), xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (48 + r1 + (64*x3)), xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tmp1 - tmp21
tl.store(out_ptr3 + (r1 + (16*x0)), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ym/cymibubc3iuae2afxllp66k3lbonae76vmckohz2qec3hdddvv2q.py
# Topologically Sorted Source Nodes: [mul_1, add_1, neg, denominator, add_2, f, f_1], Original ATen: [aten.mul, aten.add, aten.neg, aten.div, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# denominator => add
# f => div
# f_1 => mean
# mul_1 => mul_1
# neg => neg
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-05), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %add_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
triton_per_fused_add_div_mean_mul_neg_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_neg_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_neg_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_neg_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_ptr1 + (r0), None)
tmp7 = tl.load(in_ptr2 + (r0), None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-05
tmp4 = tmp2 + tmp3
tmp5 = -tmp4
tmp8 = tmp6 + tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp5 / tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zt/czt5kjwakvstgp5rdcbmuycvmplutmdgricsre4uwfs3sx6qocde.py
# Topologically Sorted Source Nodes: [mul_1, add_1, neg, denominator, add_2, f, f_1, target, ce_loss, total_loss], Original ATen: [aten.mul, aten.add, aten.neg, aten.div, aten.mean, aten.argmax, aten.nll_loss2d_forward]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# ce_loss => convert_element_type, div_1, full_default_1, ne_1, ne_2, neg_1, sum_5, sum_6, where_1
# denominator => add
# f => div
# f_1 => mean
# mul_1 => mul_1
# neg => neg
# target => argmax
# total_loss => add_3
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-05), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %add_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
# %argmax : [num_users=4] = call_function[target=torch.ops.aten.argmax.default](args = (%arg1_1, 1), kwargs = {})
# %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%argmax, -100), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg_1, %full_default_1), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {})
# %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%argmax, -100), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_2,), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_5, torch.float32), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, %convert_element_type), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %div_1), kwargs = {})
triton_per_fused_add_argmax_div_mean_mul_neg_nll_loss2d_forward_2 = async_compile.triton('triton_per_fused_add_argmax_div_mean_mul_neg_nll_loss2d_forward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_argmax_div_mean_mul_neg_nll_loss2d_forward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_argmax_div_mean_mul_neg_nll_loss2d_forward_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp17 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp32 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp56 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp58 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp61 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp64 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp79 = tl.load(in_out_ptr0 + (0))
tmp80 = tl.broadcast_to(tmp79, [XBLOCK, 1])
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1, 1], 0, tl.int64)
tmp11 = tl.full([1, 1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1, 1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1, 1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert((0 <= tmp53) & (tmp53 < 4), "index out of bounds: 0 <= tmp53 < 4")
tmp55 = tl.load(in_ptr1 + (r0 + (16*tmp53) + (64*r1)), None)
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.sum(tmp72, 1)[:, None]
tmp75 = tmp48.to(tl.int64)
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.sum(tmp76, 1)[:, None]
tmp81 = 16.0
tmp82 = tmp80 / tmp81
tmp83 = tmp78.to(tl.float32)
tmp84 = tmp74 / tmp83
tmp85 = tmp82 + tmp84
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp85, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o, ce_loss], Original ATen: [aten.mul, aten.sum, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_per_fused__log_softmax_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, buf5, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, add_1, neg, denominator, add_2, f, f_1], Original ATen: [aten.mul, aten.add, aten.neg, aten.div, aten.mean]
triton_per_fused_add_div_mean_mul_neg_1.run(buf0, buf1, buf2, buf3, 1, 16, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
buf8 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [mul_1, add_1, neg, denominator, add_2, f, f_1, target, ce_loss, total_loss], Original ATen: [aten.mul, aten.add, aten.neg, aten.div, aten.mean, aten.argmax, aten.nll_loss2d_forward]
triton_per_fused_add_argmax_div_mean_mul_neg_nll_loss2d_forward_2.run(buf8, arg1_1, buf5, 1, 64, grid=grid(1), stream=stream0)
del arg1_1
del buf5
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import warnings
from typing import Callable
from typing import Union
from typing import Optional
from enum import Enum
import torch.nn as nn
from torch.nn.modules.loss import _Loss
import torch.multiprocessing
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
SUM_MEAN = 'sum_mean'
class DiceLoss(_Loss):
"""
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added to the intersection and union components of the inter-over-union calculation to smooth results
respectively, these values should be small. The `include_background` class attribute can be set to False for
an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be
background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard:
'bool'=False, reduction: 'Union[LossReduction, str]'=LossReduction.
MEAN, smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch:
'bool'=False, my_dice: 'bool'=False) ->None:
"""
Args:
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(
f'other_act must be None or callable but is {type(other_act).__name__}.'
)
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError(
'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
self.squared_pred = squared_pred
self.jaccard = jaccard
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
self.my_dice = my_dice
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if self.softmax:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `softmax=True` ignored.')
else:
input = torch.softmax(input, 1)
if self.other_act is not None:
input = self.other_act(input)
if self.to_onehot_y:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `to_onehot_y=True` ignored.')
else:
target = one_hot(target, num_classes=n_pred_ch)
assert target.shape == input.shape, f'ground truth has differing shape ({target.shape}) from input ({input.shape})'
reduce_axis = list(range(2, len(input.shape)))
if self.batch:
reduce_axis = [0] + reduce_axis
intersection = torch.sum(target * input, dim=reduce_axis)
if self.squared_pred:
target = torch.pow(target, 2)
input = torch.pow(input, 2)
ground_o = torch.sum(target, dim=reduce_axis)
pred_o = torch.sum(input, dim=reduce_axis)
denominator = ground_o + pred_o
if self.jaccard:
denominator = 2.0 * (denominator - intersection)
if self.my_dice:
def count_nonzeros(input, dim):
mask = input != 0.0
return torch.sum(mask, dim)
f: 'torch.Tensor' = 2.0 * intersection / (denominator + self.
smooth_nr)
if not self.include_background:
if self.batch:
f = f[1:]
ground_o = ground_o[1:]
else:
f = f[:, 1:]
ground_o = ground_o[:, 1:]
f = -(torch.sum(f, dim=-1) / count_nonzeros(ground_o, dim=-1))
else:
f: 'torch.Tensor' = -(2.0 * intersection + self.smooth_nr) / (
denominator + self.smooth_dr)
if not self.include_background:
if self.batch:
f = f[1:]
else:
f = f[:, 1:]
if self.reduction == LossReduction.MEAN.value:
f = torch.mean(f)
elif self.reduction == LossReduction.SUM.value:
f = torch.sum(f)
elif self.reduction == LossReduction.NONE.value:
pass
else:
raise ValueError(
f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].'
)
return f
class DiceCELoss(_Loss):
"""
Compute both Dice loss and Cross Entropy Loss, and return the sum of these two losses.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added for dice loss part to the intersection and union components of the inter-over-union calculation
to smooth results respectively, these values should be small. The `include_background` class attribute can be
set to False for an instance of the loss to exclude the first category (channel index 0) which is by convention
assumed to be background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard:
'bool'=False, reduction: 'str'='mean', smooth_nr: 'float'=1e-05,
smooth_dr: 'float'=1e-05, batch: 'bool'=False, ce_weight:
'Optional[torch.Tensor]'=None, negate_dice: 'bool'=False, my_dice:
'bool'=False) ->None:
"""
Args:
``ce_weight`` is only used for cross entropy loss, ``reduction`` is used for both losses and other
parameters are only used for dice loss.
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``. The dice loss should
as least reduce the spatial dimensions, which is different from cross entropy loss, thus here
the ``none`` option cannot be used.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
ce_weight: a rescaling weight given to each class for cross entropy loss.
See ``torch.nn.CrossEntropyLoss()`` for more information.
"""
super().__init__()
self.dice = DiceLoss(include_background=include_background,
to_onehot_y=to_onehot_y, sigmoid=sigmoid, softmax=softmax,
other_act=other_act, squared_pred=squared_pred, jaccard=jaccard,
reduction=reduction, smooth_nr=smooth_nr, smooth_dr=smooth_dr,
batch=batch, my_dice=my_dice)
self.cross_entropy = nn.CrossEntropyLoss(weight=ce_weight,
reduction=reduction)
self.negate_dice = negate_dice
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD] or B1H[WD].
Raises:
ValueError: When number of dimensions for input and target are different.
ValueError: When number of channels for target is nither 1 or the same as input.
"""
if len(input.shape) != len(target.shape):
raise ValueError(
'the number of dimensions for input and target should be the same.'
)
dice_loss = self.dice(input, target)
if self.negate_dice:
dice_loss = dice_loss * -1
n_pred_ch, n_target_ch = input.shape[1], target.shape[1]
if n_pred_ch == n_target_ch:
target = torch.argmax(target, dim=1)
else:
target = torch.squeeze(target, dim=1)
target = target.long()
ce_loss = self.cross_entropy(input, target)
total_loss: 'torch.Tensor' = dice_loss + ce_loss
return total_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import warnings
from typing import Callable
from typing import Union
from typing import Optional
from enum import Enum
import torch.nn as nn
from torch.nn.modules.loss import _Loss
import torch.multiprocessing
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__log_softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp15 = tl.load(in_ptr1 + (r1 + 64 * x3), xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (16 + r1 + 64 * x3), xmask, eviction_policy=
'evict_last', other=0.0)
tmp18 = tl.load(in_ptr1 + (32 + r1 + 64 * x3), xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (48 + r1 + 64 * x3), xmask, eviction_policy=
'evict_last', other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tmp1 - tmp21
tl.store(out_ptr3 + (r1 + 16 * x0), tmp22, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_neg_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_ptr1 + r0, None)
tmp7 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-05
tmp4 = tmp2 + tmp3
tmp5 = -tmp4
tmp8 = tmp6 + tmp7
tmp9 = tmp8 + tmp3
tmp10 = tmp5 / tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
@triton.jit
def triton_per_fused_add_argmax_div_mean_mul_neg_nll_loss2d_forward_2(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp17 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp32 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp56 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp58 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp61 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp64 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp79 = tl.load(in_out_ptr0 + 0)
tmp80 = tl.broadcast_to(tmp79, [XBLOCK, 1])
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1, 1], 0, tl.int64)
tmp11 = tl.full([1, 1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1, 1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1, 1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1, 1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert((0 <= tmp53) & (tmp53 < 4),
'index out of bounds: 0 <= tmp53 < 4')
tmp55 = tl.load(in_ptr1 + (r0 + 16 * tmp53 + 64 * r1), None)
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tmp72 = tl.broadcast_to(tmp71, [XBLOCK, RBLOCK])
tmp74 = tl.sum(tmp72, 1)[:, None]
tmp75 = tmp48.to(tl.int64)
tmp76 = tl.broadcast_to(tmp75, [XBLOCK, RBLOCK])
tmp78 = tl.sum(tmp76, 1)[:, None]
tmp81 = 16.0
tmp82 = tmp80 / tmp81
tmp83 = tmp78.to(tl.float32)
tmp84 = tmp74 / tmp83
tmp85 = tmp82 + tmp84
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp85, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__log_softmax_mul_sum_0[grid(16)](arg1_1, arg0_1,
buf0, buf1, buf2, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1
)
del arg0_1
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_div_mean_mul_neg_1[grid(1)](buf0, buf1, buf2,
buf3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
buf8 = buf3
del buf3
triton_per_fused_add_argmax_div_mean_mul_neg_nll_loss2d_forward_2[grid
(1)](buf8, arg1_1, buf5, 1, 64, XBLOCK=1, num_warps=2, num_stages=1
)
del arg1_1
del buf5
return buf8,
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
SUM_MEAN = 'sum_mean'
class DiceLoss(_Loss):
"""
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added to the intersection and union components of the inter-over-union calculation to smooth results
respectively, these values should be small. The `include_background` class attribute can be set to False for
an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be
background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard:
'bool'=False, reduction: 'Union[LossReduction, str]'=LossReduction.
MEAN, smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch:
'bool'=False, my_dice: 'bool'=False) ->None:
"""
Args:
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(
f'other_act must be None or callable but is {type(other_act).__name__}.'
)
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError(
'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
self.squared_pred = squared_pred
self.jaccard = jaccard
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
self.my_dice = my_dice
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if self.softmax:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `softmax=True` ignored.')
else:
input = torch.softmax(input, 1)
if self.other_act is not None:
input = self.other_act(input)
if self.to_onehot_y:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `to_onehot_y=True` ignored.')
else:
target = one_hot(target, num_classes=n_pred_ch)
assert target.shape == input.shape, f'ground truth has differing shape ({target.shape}) from input ({input.shape})'
reduce_axis = list(range(2, len(input.shape)))
if self.batch:
reduce_axis = [0] + reduce_axis
intersection = torch.sum(target * input, dim=reduce_axis)
if self.squared_pred:
target = torch.pow(target, 2)
input = torch.pow(input, 2)
ground_o = torch.sum(target, dim=reduce_axis)
pred_o = torch.sum(input, dim=reduce_axis)
denominator = ground_o + pred_o
if self.jaccard:
denominator = 2.0 * (denominator - intersection)
if self.my_dice:
def count_nonzeros(input, dim):
mask = input != 0.0
return torch.sum(mask, dim)
f: 'torch.Tensor' = 2.0 * intersection / (denominator + self.
smooth_nr)
if not self.include_background:
if self.batch:
f = f[1:]
ground_o = ground_o[1:]
else:
f = f[:, 1:]
ground_o = ground_o[:, 1:]
f = -(torch.sum(f, dim=-1) / count_nonzeros(ground_o, dim=-1))
else:
f: 'torch.Tensor' = -(2.0 * intersection + self.smooth_nr) / (
denominator + self.smooth_dr)
if not self.include_background:
if self.batch:
f = f[1:]
else:
f = f[:, 1:]
if self.reduction == LossReduction.MEAN.value:
f = torch.mean(f)
elif self.reduction == LossReduction.SUM.value:
f = torch.sum(f)
elif self.reduction == LossReduction.NONE.value:
pass
else:
raise ValueError(
f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].'
)
return f
class DiceCELossNew(_Loss):
"""
Compute both Dice loss and Cross Entropy Loss, and return the sum of these two losses.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added for dice loss part to the intersection and union components of the inter-over-union calculation
to smooth results respectively, these values should be small. The `include_background` class attribute can be
set to False for an instance of the loss to exclude the first category (channel index 0) which is by convention
assumed to be background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard:
'bool'=False, reduction: 'str'='mean', smooth_nr: 'float'=1e-05,
smooth_dr: 'float'=1e-05, batch: 'bool'=False, ce_weight:
'Optional[torch.Tensor]'=None, negate_dice: 'bool'=False, my_dice:
'bool'=False) ->None:
"""
Args:
``ce_weight`` is only used for cross entropy loss, ``reduction`` is used for both losses and other
parameters are only used for dice loss.
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``. The dice loss should
as least reduce the spatial dimensions, which is different from cross entropy loss, thus here
the ``none`` option cannot be used.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
ce_weight: a rescaling weight given to each class for cross entropy loss.
See ``torch.nn.CrossEntropyLoss()`` for more information.
"""
super().__init__()
self.dice = DiceLoss(include_background=include_background,
to_onehot_y=to_onehot_y, sigmoid=sigmoid, softmax=softmax,
other_act=other_act, squared_pred=squared_pred, jaccard=jaccard,
reduction=reduction, smooth_nr=smooth_nr, smooth_dr=smooth_dr,
batch=batch, my_dice=my_dice)
self.cross_entropy = nn.CrossEntropyLoss(weight=ce_weight,
reduction=reduction)
self.negate_dice = negate_dice
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| elitap/classimbalance | DiceCELoss | false | 6,669 | [
"Apache-2.0"
] | 1 | ae807ec533da5eef18f4180b29383399bc57696a | https://github.com/elitap/classimbalance/tree/ae807ec533da5eef18f4180b29383399bc57696a | import torch
import warnings
from typing import Callable
from typing import Union
from typing import Optional
from enum import Enum
import torch.nn as nn
from torch.nn.modules.loss import _Loss
import torch.multiprocessing
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
SUM_MEAN = 'sum_mean'
class DiceLoss(_Loss):
"""
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added to the intersection and union components of the inter-over-union calculation to smooth results
respectively, these values should be small. The `include_background` class attribute can be set to False for
an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be
background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard:
'bool'=False, reduction: 'Union[LossReduction, str]'=LossReduction.
MEAN, smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch:
'bool'=False, my_dice: 'bool'=False) ->None:
"""
Args:
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
# ... truncated (>4000 chars) for memory efficiency |
BiasConvFc2Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3u/c3u6zmp5plplv5tjyzlxet5sgcucpeizysbhi7xphxjhdc6kmodq.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5b/c5br3r4gpi7zzaygqfdgcqeerwiekt2d2t2wkw4sj54lam6radgq.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
return (reinterpret_tensor(buf5, (4, 1, 1), (1, 1, 1), 0), primals_1, primals_2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BiasConvFc2Net(nn.Module):
def __init__(self, in_channels, groups, n_segment, kernel_size=3, padding=1
):
super(BiasConvFc2Net, self).__init__()
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc = nn.Linear(n_segment, n_segment)
self.relu = nn.ReLU()
self.lastlayer = nn.Linear(n_segment, groups)
def forward(self, x):
N, _C, T = x.shape
x = self.conv(x)
x = x.view(N, T)
x = self.relu(self.fc(x))
x = self.lastlayer(x)
x = x.view(N, 1, -1)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'groups': 1, 'n_segment': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(16)](buf3, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 1, 1), (1, 1, 1), 0
), primals_1, primals_2, reinterpret_tensor(buf1, (4, 4), (4, 1), 0
), buf3, primals_6, primals_4
class BiasConvFc2NetNew(nn.Module):
def __init__(self, in_channels, groups, n_segment, kernel_size=3, padding=1
):
super(BiasConvFc2NetNew, self).__init__()
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc = nn.Linear(n_segment, n_segment)
self.relu = nn.ReLU()
self.lastlayer = nn.Linear(n_segment, groups)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_4 = self.fc.weight
primals_5 = self.fc.bias
primals_6 = self.lastlayer.weight
primals_7 = self.lastlayer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| eynaij/X-Temporal_catdim | BiasConvFc2Net | false | 6,670 | [
"MIT"
] | 1 | 6a2efba407c09c83ca061c8467c1373b6ed0c7eb | https://github.com/eynaij/X-Temporal_catdim/tree/6a2efba407c09c83ca061c8467c1373b6ed0c7eb | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels, groups, n_segment, kernel_size=3, padding=1
):
super().__init__()
self.conv = nn.Conv1d(in_channels, 1, kernel_size, padding=padding)
self.fc = nn.Linear(n_segment, n_segment)
self.relu = nn.ReLU()
self.lastlayer = nn.Linear(n_segment, groups)
def forward(self, x):
N, _C, T = x.shape
x = self.conv(x)
x = x.view(N, T)
x = self.relu(self.fc(x))
x = self.lastlayer(x)
x = x.view(N, 1, -1)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 1, 4]
|
SCse | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [z_2], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# z_2 => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_2, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ad/cadccuyhl7stcp3nyqfgohiwbiv5ckfzxsye27ithwsill6dvmh4.py
# Topologically Sorted Source Nodes: [conv2d_1, z_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# z_3 => relu
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_5, %primals_6, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ja/cjafhv3pz4ruylypa4fzgtmsu3ji2wfc7gvhxz7bbv527bxiebxd.py
# Topologically Sorted Source Nodes: [z_1, mul, z_4, mul_1, add], Original ATen: [aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# mul_1 => mul_1
# z_1 => sigmoid
# z_4 => sigmoid_1
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_2,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
x4 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp0 * tmp5
tmp7 = tmp3 + tmp6
tl.store(out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [z_2], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf2, primals_2, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 1, 1), (1, 1, 1, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, z_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf4, primals_4, 4, grid=grid(4), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf6, primals_6, 16, grid=grid(16), stream=stream0)
del primals_6
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [z_1, mul, z_4, mul_1, add], Original ATen: [aten.sigmoid, aten.mul, aten.add]
triton_poi_fused_add_mul_sigmoid_3.run(primals_2, buf0, buf6, buf7, 256, grid=grid(256), stream=stream0)
return (buf7, primals_1, primals_2, primals_3, primals_5, buf0, buf2, buf4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GAB(nn.Module):
def __init__(self, input_dim, reduction=4):
super(GAB, self).__init__()
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(input_dim, input_dim // reduction,
kernel_size=1, stride=1)
self.conv2 = nn.Conv2d(input_dim // reduction, input_dim,
kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
z = self.global_avgpool(x)
z = self.relu(self.conv1(z))
z = self.sigmoid(self.conv2(z))
return x * z
class SpatialAttention2d(nn.Module):
def __init__(self, channel):
super(SpatialAttention2d, self).__init__()
self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
z = self.squeeze(x)
z = self.sigmoid(z)
return x * z
class SCse(nn.Module):
def __init__(self, dim):
super(SCse, self).__init__()
self.satt = SpatialAttention2d(dim)
self.catt = GAB(dim)
def forward(self, x):
return self.satt(x) + self.catt(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x4 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tl.sigmoid(tmp4)
tmp6 = tmp0 * tmp5
tmp7 = tmp3 + tmp6
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf1
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf2, primals_2, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 1, 1), (1, 1, 1, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_1[grid(4)](buf4, primals_4, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_4
buf5 = extern_kernels.convolution(buf4, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_2[grid(16)](buf6, primals_6, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_6
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_3[grid(256)](primals_2, buf0, buf6,
buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
return (buf7, primals_1, primals_2, primals_3, primals_5, buf0, buf2,
buf4, buf6)
class GAB(nn.Module):
def __init__(self, input_dim, reduction=4):
super(GAB, self).__init__()
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(input_dim, input_dim // reduction,
kernel_size=1, stride=1)
self.conv2 = nn.Conv2d(input_dim // reduction, input_dim,
kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
z = self.global_avgpool(x)
z = self.relu(self.conv1(z))
z = self.sigmoid(self.conv2(z))
return x * z
class SpatialAttention2d(nn.Module):
def __init__(self, channel):
super(SpatialAttention2d, self).__init__()
self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
z = self.squeeze(x)
z = self.sigmoid(z)
return x * z
class SCseNew(nn.Module):
def __init__(self, dim):
super(SCseNew, self).__init__()
self.satt = SpatialAttention2d(dim)
self.catt = GAB(dim)
def forward(self, input_0):
primals_1 = self.satt.squeeze.weight
primals_3 = self.catt.conv1.weight
primals_4 = self.catt.conv1.bias
primals_5 = self.catt.conv2.weight
primals_6 = self.catt.conv2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| evilidol/kaggle-Steel-Defect-Detection | SCse | false | 6,671 | [
"MIT"
] | 1 | 41e3e360f49d706c8c79bcd442342c529648a736 | https://github.com/evilidol/kaggle-Steel-Defect-Detection/tree/41e3e360f49d706c8c79bcd442342c529648a736 | import torch
import torch.nn as nn
class GAB(nn.Module):
def __init__(self, input_dim, reduction=4):
super().__init__()
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(input_dim, input_dim // reduction,
kernel_size=1, stride=1)
self.conv2 = nn.Conv2d(input_dim // reduction, input_dim,
kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
z = self.global_avgpool(x)
z = self.relu(self.conv1(z))
z = self.sigmoid(self.conv2(z))
return x * z
class SpatialAttention2d(nn.Module):
def __init__(self, channel):
super().__init__()
self.squeeze = nn.Conv2d(channel, 1, kernel_size=1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
z = self.squeeze(x)
z = self.sigmoid(z)
return x * z
class Model(nn.Module):
def __init__(self, dim):
super().__init__()
self.satt = SpatialAttention2d(dim)
self.catt = GAB(dim)
def forward(self, x):
return self.satt(x) + self.catt(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
BertAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iz/ciztqj6kop3hxov46yrmzprkzfir3eljcic4mkqznz2j5cfeaudr.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float("-inf")
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = (tmp29 != 0)
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = (tmp33 != 0)
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = (tmp38 != 0)
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = (tmp43 != 0)
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr1 + (x2), tmp25, xmask)
tl.store(out_ptr2 + (x2), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/x5/cx5uvbfethxuwwkwxf3xaualzhlcwqsz4jxqpbhintggaypzjwqf.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x4), xmask)
tmp3 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hk/chkirlrxzb52fxbrq2rynamgt7aligt77yn6j6ihfk46whjvd374.py
# Topologically Sorted Source Nodes: [add_1, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add_1 => add_1
# pow_1 => pow_1
# s => mean_1
# sub => sub_1
# u => mean
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
triton_poi_fused_add_mean_pow_sub_5 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3j/c3junievyif7korlhvw3ftme6dpzx2uodc43q34sexam3chrqpdb.py
# Topologically Sorted Source Nodes: [add_1, u, sub, add_2, sqrt, x_3, mul, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# hidden_states_2 => add_3
# mul => mul
# sqrt => sqrt
# sub => sub_1
# u => mean
# x_3 => div_2
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %div_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_12), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_6 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, primals_8, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf9, buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_7, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add_1, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
triton_poi_fused_add_mean_pow_sub_5.run(buf13, primals_3, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_1, u, sub, add_2, sqrt, x_3, mul, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_sqrt_sub_6.run(primals_11, buf13, primals_3, buf14, buf15, primals_12, buf16, 64, grid=grid(64), stream=stream0)
del buf14
del buf15
del primals_12
return (buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, config, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float('-inf')
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = tmp29 != 0
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = tmp33 != 0
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38 != 0
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = tmp43 != 0
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
tl.store(out_ptr2 + x2, tmp45, xmask)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_out_ptr0 + x4, xmask)
tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_mean_pow_sub_5[grid(16)](buf13, primals_3,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_6[grid(64)](primals_11,
buf13, primals_3, buf14, buf15, primals_12, buf16, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf14
del buf15
del primals_12
return buf16, primals_3, primals_11, buf9, reinterpret_tensor(buf10, (
16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0
), reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, primals_9
class BertLayerNorm(nn.Module):
def __init__(self, config, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttentionNew(nn.Module):
def __init__(self, config):
super(BertAttentionNew, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_0, input_1):
primals_1 = self.self.query.weight
primals_2 = self.self.query.bias
primals_4 = self.self.key.weight
primals_5 = self.self.key.bias
primals_6 = self.self.value.weight
primals_7 = self.self.value.bias
primals_9 = self.output.dense.weight
primals_10 = self.output.dense.bias
primals_11 = self.output.LayerNorm.gamma
primals_12 = self.output.LayerNorm.beta
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| BLimmie/pytorch-pretrained-BERT | BertAttention | false | 6,672 | [
"Apache-2.0"
] | 1 | 2ac4b29641e569020ed2acc28016f481f617052b | https://github.com/BLimmie/pytorch-pretrained-BERT/tree/2ac4b29641e569020ed2acc28016f481f617052b | from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, config, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super().__init__()
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidde
# ... truncated (>4000 chars) for memory efficiency |
WeightConvNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3u/c3u6zmp5plplv5tjyzlxet5sgcucpeizysbhi7xphxjhdc6kmodq.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 4), 0), primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class WeightConvNet(nn.Module):
def __init__(self, in_channels, groups, n_segment):
super(WeightConvNet, self).__init__()
self.lastlayer = nn.Conv1d(in_channels, groups, 3, padding=1)
self.groups = groups
def forward(self, x):
N, _C, T = x.shape
x = self.lastlayer(x)
x = x.view(N, self.groups, T)
x = x.permute(0, 2, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'groups': 1, 'n_segment': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4), (4, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 4), 0
), primals_1, primals_2
class WeightConvNetNew(nn.Module):
def __init__(self, in_channels, groups, n_segment):
super(WeightConvNetNew, self).__init__()
self.lastlayer = nn.Conv1d(in_channels, groups, 3, padding=1)
self.groups = groups
def forward(self, input_0):
primals_2 = self.lastlayer.weight
primals_3 = self.lastlayer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| eynaij/X-Temporal_catdim | WeightConvNet | false | 6,673 | [
"MIT"
] | 1 | 6a2efba407c09c83ca061c8467c1373b6ed0c7eb | https://github.com/eynaij/X-Temporal_catdim/tree/6a2efba407c09c83ca061c8467c1373b6ed0c7eb | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels, groups, n_segment):
super().__init__()
self.lastlayer = nn.Conv1d(in_channels, groups, 3, padding=1)
self.groups = groups
def forward(self, x):
N, _C, T = x.shape
x = self.lastlayer(x)
x = x.view(N, self.groups, T)
x = x.permute(0, 2, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 1, 4]
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/f2/cf2epv27sb2j3mvx2mgbzrr3bnxkm6qxm3ktma6flacbcjoyt2oh.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add_1, sum_2, sum_3, union, add_2, dsc, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# dsc => div
# intersection => sum_1
# mul => mul
# mul_1 => mul_1
# sub => sub
# sum_2 => sum_2
# sum_3 => sum_3
# union => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-06), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1e-06
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add_1, sum_2, sum_3, union, add_2, dsc, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, smooth=1e-06):
super(DiceLoss, self).__init__()
self.smooth = smooth
def forward(self, y_pred, y_true):
assert y_pred.size() == y_true.size()
y_pred = y_pred.contiguous().view(y_pred.shape[0], -1)
y_true = y_true.contiguous().view(y_true.shape[0], -1)
intersection = (y_pred * y_true).sum()
union = y_pred.sum() + y_true.sum()
dsc = (2.0 * intersection + self.smooth) / (union + self.smooth)
return 1.0 - dsc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1e-06
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self, smooth=1e-06):
super(DiceLossNew, self).__init__()
self.smooth = smooth
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| fadamsyah/pytorch-brain-mri-segmentation | DiceLoss | false | 6,674 | [
"MIT"
] | 1 | bdb310ecacbddfce2cef20d50cf0b638dd1bc7b1 | https://github.com/fadamsyah/pytorch-brain-mri-segmentation/tree/bdb310ecacbddfce2cef20d50cf0b638dd1bc7b1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, smooth=1e-06):
super().__init__()
self.smooth = smooth
def forward(self, y_pred, y_true):
assert y_pred.size() == y_true.size()
y_pred = y_pred.contiguous().view(y_pred.shape[0], -1)
y_true = y_true.contiguous().view(y_true.shape[0], -1)
intersection = (y_pred * y_true).sum()
union = y_pred.sum() + y_true.sum()
dsc = (2.0 * intersection + self.smooth) / (union + self.smooth)
return 1.0 - dsc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CircleLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wo/cwoengrvscqt5xm6wza73dkovcud6nyo3yd56mgv46p7mrciqlzn.py
# Topologically Sorted Source Nodes: [add_2, an, sub_1, mul_2, logit_n, logsumexp, neg, add, add_1, ap, neg_1, sub, mul, logit_p, logsumexp_1, add_3, loss], Original ATen: [aten.add, aten.clamp_min, aten.sub, aten.mul, aten.logsumexp, aten.neg, aten.softplus]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_5
# an => clamp_min_1
# ap => clamp_min
# logit_n => mul_3
# logit_p => mul_1
# logsumexp => abs_1, add_3, amax, eq, exp, full_default, log, sub_2, sum_1, where
# logsumexp_1 => abs_2, add_4, amax_1, eq_1, exp_1, full_default_1, log_1, sub_3, sum_2, where_1
# loss => div, exp_2, gt, log1p, mul_4, where_2
# mul => mul
# mul_2 => mul_2
# neg => neg
# neg_1 => neg_1
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 4), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_2, 0.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 4), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min_1, %sub_1), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, 4), kwargs = {})
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%mul_3, [0], True), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_1, inf), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %amax), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %where), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0]), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %squeeze), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 4), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_1, 0.0), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%clamp_min,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, -3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, %sub), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 4), kwargs = {})
# %amax_1 : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%mul_1, [0], True), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax_1,), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_2, inf), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %amax_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %where_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [0]), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log_1, %squeeze_1), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %add_4), kwargs = {})
# %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul_4, 20.0), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_4,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_5, %div), kwargs = {})
triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0 = async_compile.triton('triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp8 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp44 = tl.load(in_ptr1 + (x0), xmask)
tmp55 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp65 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp75 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 - tmp1
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp1
tmp9 = tmp8 + tmp1
tmp10 = triton_helpers.maximum(tmp9, tmp3)
tmp11 = tmp8 - tmp1
tmp12 = tmp10 * tmp11
tmp13 = tmp12 * tmp1
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp16 = tmp15 + tmp1
tmp17 = triton_helpers.maximum(tmp16, tmp3)
tmp18 = tmp15 - tmp1
tmp19 = tmp17 * tmp18
tmp20 = tmp19 * tmp1
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp23 = tmp22 + tmp1
tmp24 = triton_helpers.maximum(tmp23, tmp3)
tmp25 = tmp22 - tmp1
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp1
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tl_math.abs(tmp28)
tmp30 = float("inf")
tmp31 = tmp29 == tmp30
tmp32 = tl.where(tmp31, tmp3, tmp28)
tmp33 = tmp7 - tmp32
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp13 - tmp32
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tmp20 - tmp32
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp27 - tmp32
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tmp45 = -tmp44
tmp46 = 1.0
tmp47 = tmp45 + tmp46
tmp48 = tmp47 + tmp1
tmp49 = triton_helpers.maximum(tmp48, tmp3)
tmp50 = -tmp49
tmp51 = -3.0
tmp52 = tmp44 - tmp51
tmp53 = tmp50 * tmp52
tmp54 = tmp53 * tmp1
tmp56 = -tmp55
tmp57 = tmp56 + tmp46
tmp58 = tmp57 + tmp1
tmp59 = triton_helpers.maximum(tmp58, tmp3)
tmp60 = -tmp59
tmp61 = tmp55 - tmp51
tmp62 = tmp60 * tmp61
tmp63 = tmp62 * tmp1
tmp64 = triton_helpers.maximum(tmp54, tmp63)
tmp66 = -tmp65
tmp67 = tmp66 + tmp46
tmp68 = tmp67 + tmp1
tmp69 = triton_helpers.maximum(tmp68, tmp3)
tmp70 = -tmp69
tmp71 = tmp65 - tmp51
tmp72 = tmp70 * tmp71
tmp73 = tmp72 * tmp1
tmp74 = triton_helpers.maximum(tmp64, tmp73)
tmp76 = -tmp75
tmp77 = tmp76 + tmp46
tmp78 = tmp77 + tmp1
tmp79 = triton_helpers.maximum(tmp78, tmp3)
tmp80 = -tmp79
tmp81 = tmp75 - tmp51
tmp82 = tmp80 * tmp81
tmp83 = tmp82 * tmp1
tmp84 = triton_helpers.maximum(tmp74, tmp83)
tmp85 = tl_math.abs(tmp84)
tmp86 = tmp85 == tmp30
tmp87 = tl.where(tmp86, tmp3, tmp84)
tmp88 = tmp54 - tmp87
tmp89 = tl_math.exp(tmp88)
tmp90 = tmp63 - tmp87
tmp91 = tl_math.exp(tmp90)
tmp92 = tmp89 + tmp91
tmp93 = tmp73 - tmp87
tmp94 = tl_math.exp(tmp93)
tmp95 = tmp92 + tmp94
tmp96 = tmp83 - tmp87
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp95 + tmp97
tmp99 = tl_math.log(tmp43)
tmp100 = tmp99 + tmp32
tmp101 = tl_math.log(tmp98)
tmp102 = tmp101 + tmp87
tmp103 = tmp100 + tmp102
tmp104 = tmp103 * tmp46
tmp105 = 20.0
tmp106 = tmp104 > tmp105
tmp107 = tl_math.exp(tmp104)
tmp108 = libdevice.log1p(tmp107)
tmp109 = tmp108 * tmp46
tmp110 = tl.where(tmp106, tmp103, tmp109)
tl.store(in_out_ptr0 + (x0), tmp110, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [add_2, an, sub_1, mul_2, logit_n, logsumexp, neg, add, add_1, ap, neg_1, sub, mul, logit_p, logsumexp_1, add_3, loss], Original ATen: [aten.add, aten.clamp_min, aten.sub, aten.mul, aten.logsumexp, aten.neg, aten.softplus]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0.run(buf5, arg1_1, arg0_1, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from torch import nn
class CircleLoss(nn.Module):
def __init__(self, m: 'float', gamma: 'float') ->None:
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.soft_plus = nn.Softplus()
def forward(self, sp: 'Tensor', sn: 'Tensor') ->Tensor:
ap = torch.clamp_min(-sp.detach() + 1 + self.m, min=0.0)
an = torch.clamp_min(sn.detach() + self.m, min=0.0)
delta_p = 1 - self.m
delta_n = self.m
logit_p = -ap * (sp - delta_p) * self.gamma
logit_n = an * (sn - delta_n) * self.gamma
loss = self.soft_plus(torch.logsumexp(logit_n, dim=0) + torch.
logsumexp(logit_p, dim=0))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'m': 4, 'gamma': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0(in_out_ptr0
, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp15 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp22 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp44 = tl.load(in_ptr1 + x0, xmask)
tmp55 = tl.load(in_ptr1 + (64 + x0), xmask)
tmp65 = tl.load(in_ptr1 + (128 + x0), xmask)
tmp75 = tl.load(in_ptr1 + (192 + x0), xmask)
tmp1 = 4.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp0 - tmp1
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp1
tmp9 = tmp8 + tmp1
tmp10 = triton_helpers.maximum(tmp9, tmp3)
tmp11 = tmp8 - tmp1
tmp12 = tmp10 * tmp11
tmp13 = tmp12 * tmp1
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp16 = tmp15 + tmp1
tmp17 = triton_helpers.maximum(tmp16, tmp3)
tmp18 = tmp15 - tmp1
tmp19 = tmp17 * tmp18
tmp20 = tmp19 * tmp1
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp23 = tmp22 + tmp1
tmp24 = triton_helpers.maximum(tmp23, tmp3)
tmp25 = tmp22 - tmp1
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp1
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tl_math.abs(tmp28)
tmp30 = float('inf')
tmp31 = tmp29 == tmp30
tmp32 = tl.where(tmp31, tmp3, tmp28)
tmp33 = tmp7 - tmp32
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp13 - tmp32
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tmp20 - tmp32
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp27 - tmp32
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tmp45 = -tmp44
tmp46 = 1.0
tmp47 = tmp45 + tmp46
tmp48 = tmp47 + tmp1
tmp49 = triton_helpers.maximum(tmp48, tmp3)
tmp50 = -tmp49
tmp51 = -3.0
tmp52 = tmp44 - tmp51
tmp53 = tmp50 * tmp52
tmp54 = tmp53 * tmp1
tmp56 = -tmp55
tmp57 = tmp56 + tmp46
tmp58 = tmp57 + tmp1
tmp59 = triton_helpers.maximum(tmp58, tmp3)
tmp60 = -tmp59
tmp61 = tmp55 - tmp51
tmp62 = tmp60 * tmp61
tmp63 = tmp62 * tmp1
tmp64 = triton_helpers.maximum(tmp54, tmp63)
tmp66 = -tmp65
tmp67 = tmp66 + tmp46
tmp68 = tmp67 + tmp1
tmp69 = triton_helpers.maximum(tmp68, tmp3)
tmp70 = -tmp69
tmp71 = tmp65 - tmp51
tmp72 = tmp70 * tmp71
tmp73 = tmp72 * tmp1
tmp74 = triton_helpers.maximum(tmp64, tmp73)
tmp76 = -tmp75
tmp77 = tmp76 + tmp46
tmp78 = tmp77 + tmp1
tmp79 = triton_helpers.maximum(tmp78, tmp3)
tmp80 = -tmp79
tmp81 = tmp75 - tmp51
tmp82 = tmp80 * tmp81
tmp83 = tmp82 * tmp1
tmp84 = triton_helpers.maximum(tmp74, tmp83)
tmp85 = tl_math.abs(tmp84)
tmp86 = tmp85 == tmp30
tmp87 = tl.where(tmp86, tmp3, tmp84)
tmp88 = tmp54 - tmp87
tmp89 = tl_math.exp(tmp88)
tmp90 = tmp63 - tmp87
tmp91 = tl_math.exp(tmp90)
tmp92 = tmp89 + tmp91
tmp93 = tmp73 - tmp87
tmp94 = tl_math.exp(tmp93)
tmp95 = tmp92 + tmp94
tmp96 = tmp83 - tmp87
tmp97 = tl_math.exp(tmp96)
tmp98 = tmp95 + tmp97
tmp99 = tl_math.log(tmp43)
tmp100 = tmp99 + tmp32
tmp101 = tl_math.log(tmp98)
tmp102 = tmp101 + tmp87
tmp103 = tmp100 + tmp102
tmp104 = tmp103 * tmp46
tmp105 = 20.0
tmp106 = tmp104 > tmp105
tmp107 = tl_math.exp(tmp104)
tmp108 = libdevice.log1p(tmp107)
tmp109 = tmp108 * tmp46
tmp110 = tl.where(tmp106, tmp103, tmp109)
tl.store(in_out_ptr0 + x0, tmp110, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = buf2
del buf2
get_raw_stream(0)
triton_poi_fused_add_clamp_min_logsumexp_mul_neg_softplus_sub_0[grid
(64)](buf5, arg1_1, arg0_1, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del arg0_1
del arg1_1
return buf5,
class CircleLossNew(nn.Module):
def __init__(self, m: 'float', gamma: 'float') ->None:
super(CircleLossNew, self).__init__()
self.m = m
self.gamma = gamma
self.soft_plus = nn.Softplus()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| fabiozappo/Person_reID_tensorrt | CircleLoss | false | 6,675 | [
"Apache-2.0"
] | 1 | 164441f35777698274e7664a9aefcc8d54467dc3 | https://github.com/fabiozappo/Person_reID_tensorrt/tree/164441f35777698274e7664a9aefcc8d54467dc3 | import torch
from torch import Tensor
from torch import nn
class Model(nn.Module):
def __init__(self, m: 'float', gamma: 'float') ->None:
super().__init__()
self.m = m
self.gamma = gamma
self.soft_plus = nn.Softplus()
def forward(self, sp: 'Tensor', sn: 'Tensor') ->Tensor:
ap = torch.clamp_min(-sp.detach() + 1 + self.m, min=0.0)
an = torch.clamp_min(sn.detach() + self.m, min=0.0)
delta_p = 1 - self.m
delta_n = self.m
logit_p = -ap * (sp - delta_p) * self.gamma
logit_n = an * (sn - delta_n) * self.gamma
loss = self.soft_plus(torch.logsumexp(logit_n, dim=0) + torch.
logsumexp(logit_p, dim=0))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Aggregation | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zz/czzshumc33uf6556bhjicm54dfoveb3cgwxv5nwymam34beaz547.py
# Topologically Sorted Source Nodes: [add, mean, sub], Original ATen: [aten.add, aten.mean, aten.sub]
# Source node to ATen node mapping:
# add => add
# mean => mean
# sub => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
triton_poi_fused_add_mean_sub_0 = async_compile.triton('triton_poi_fused_add_mean_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp3 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = 4.0
tmp11 = tmp9 / tmp10
tmp12 = tmp2 - tmp11
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, mean, sub], Original ATen: [aten.add, aten.mean, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import *
class Aggregation(nn.Module):
"""
Aggregation layer for the Dueling architecture.
https://arxiv.org/abs/1511.06581
This layer computes a Q function by combining
an estimate of V with an estimate of the advantage.
The advantage is normalized by substracting the average
advantage so that we can propertly
"""
def forward(self, value, advantages):
return value + advantages - torch.mean(advantages, dim=1, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.nn import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp9 = tmp7 + tmp8
tmp10 = 4.0
tmp11 = tmp9 / tmp10
tmp12 = tmp2 - tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_sub_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class AggregationNew(nn.Module):
"""
Aggregation layer for the Dueling architecture.
https://arxiv.org/abs/1511.06581
This layer computes a Q function by combining
an estimate of V with an estimate of the advantage.
The advantage is normalized by substracting the average
advantage so that we can propertly
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ezelikman/autonomous-learning-library | Aggregation | false | 6,676 | [
"MIT"
] | 1 | b32d059ca8b191afe0b310102d0754796f391aff | https://github.com/ezelikman/autonomous-learning-library/tree/b32d059ca8b191afe0b310102d0754796f391aff | import torch
from torch import nn
from torch.nn import *
class Model(nn.Module):
"""
Aggregation layer for the Dueling architecture.
https://arxiv.org/abs/1511.06581
This layer computes a Q function by combining
an estimate of V with an estimate of the advantage.
The advantage is normalized by substracting the average
advantage so that we can propertly
"""
def forward(self, value, advantages):
return value + advantages - torch.mean(advantages, dim=1, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Entropy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ez/cezlxaqqtuq7qxg4wz6eew3jk443fdmth5yevubilnqutpg2ltwr.py
# Topologically Sorted Source Nodes: [log, plogp, setitem], Original ATen: [aten.log, aten.mul, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# log => log
# plogp => mul
# setitem => full_default, index_put
# Graph fragment:
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%mul, [%ne], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_log_mul_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_log_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_log_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_log_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.log(tmp0)
tmp2 = tmp0 * tmp1
tmp3 = tmp2 != tmp2
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/n5/cn5h6zqjn7scgjmxd4suhuixqlsejzanicbodm4lnafmdwo27s7u.py
# Topologically Sorted Source Nodes: [sum_1, neg], Original ATen: [aten.sum, aten.neg]
# Source node to ATen node mapping:
# neg => neg
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%index_put, [-1]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {})
triton_poi_fused_neg_sum_1 = async_compile.triton('triton_poi_fused_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_neg_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_neg_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = -tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log, plogp, setitem], Original ATen: [aten.log, aten.mul, aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_log_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, neg], Original ATen: [aten.sum, aten.neg]
triton_poi_fused_neg_sum_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Entropy(nn.Module):
def __init__(self):
super(Entropy, self).__init__()
def forward(self, x):
plogp = x * torch.log(x)
plogp[plogp != plogp] = 0
return -torch.sum(plogp, dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_put_lift_fresh_log_mul_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.log(tmp0)
tmp2 = tmp0 * tmp1
tmp3 = tmp2 != tmp2
tmp4 = 0.0
tmp5 = tl.where(tmp3, tmp4, tmp2)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_neg_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = -tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_log_mul_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_neg_sum_1[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
return buf1,
class EntropyNew(nn.Module):
def __init__(self):
super(EntropyNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| fallcat/synst | Entropy | false | 6,677 | [
"BSD-3-Clause"
] | 1 | 0fa4adffa825af4a62b6e739b59c4125a7b6698e | https://github.com/fallcat/synst/tree/0fa4adffa825af4a62b6e739b59c4125a7b6698e | import torch
from torch import nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
plogp = x * torch.log(x)
plogp[plogp != plogp] = 0
return -torch.sum(plogp, dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
QuaternionLinear | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ho/choh64wmlh7hsuuqsanwxxdyhyh7ew67epm3l5imkc6atn5yxoef.py
# Topologically Sorted Source Nodes: [cat_kernels_4_quaternion], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_kernels_4_quaternion => cat_4
# Graph fragment:
# %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%cat, %cat_1, %cat_2, %cat_3], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp9 = tl.load(in_ptr0 + (0))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp16 = tl.load(in_ptr1 + (0))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp26 = tl.load(in_ptr2 + (0))
tmp27 = tl.broadcast_to(tmp26, [XBLOCK])
tmp35 = tl.load(in_ptr3 + (0))
tmp36 = tl.broadcast_to(tmp35, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp7 & tmp4
tmp11 = tmp5 >= tmp3
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp5 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp14 & tmp4
tmp18 = -tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp15, tmp18, tmp19)
tmp21 = tmp5 >= tmp12
tmp22 = tl.full([1], 3, tl.int64)
tmp23 = tmp5 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tmp24 & tmp4
tmp28 = -tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp25, tmp28, tmp29)
tmp31 = tmp5 >= tmp22
tmp32 = tl.full([1], 4, tl.int64)
tmp33 = tmp5 < tmp32
tmp34 = tmp31 & tmp4
tmp37 = -tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp34, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp7, tmp10, tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp4, tmp42, tmp43)
tmp45 = tmp0 >= tmp3
tmp46 = tmp0 < tmp12
tmp47 = tmp45 & tmp46
tmp48 = tmp7 & tmp47
tmp49 = tmp14 & tmp47
tmp50 = tmp24 & tmp47
tmp51 = tl.where(tmp50, tmp37, tmp38)
tmp52 = tmp31 & tmp47
tmp53 = tl.where(tmp24, tmp51, tmp27)
tmp54 = tl.where(tmp14, tmp10, tmp53)
tmp55 = tl.where(tmp7, tmp17, tmp54)
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp47, tmp55, tmp56)
tmp58 = tmp0 >= tmp12
tmp59 = tmp0 < tmp22
tmp60 = tmp58 & tmp59
tmp61 = tmp7 & tmp60
tmp62 = tmp14 & tmp60
tmp63 = tmp24 & tmp60
tmp64 = tmp31 & tmp60
tmp65 = tl.where(tmp64, tmp18, tmp19)
tmp66 = tl.where(tmp24, tmp10, tmp65)
tmp67 = tl.where(tmp14, tmp36, tmp66)
tmp68 = tl.where(tmp7, tmp27, tmp67)
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp60, tmp68, tmp69)
tmp71 = tmp0 >= tmp22
tmp72 = tmp0 < tmp32
tmp73 = tmp7 & tmp71
tmp74 = tmp14 & tmp71
tmp75 = tl.where(tmp74, tmp28, tmp29)
tmp76 = tmp24 & tmp71
tmp77 = tmp31 & tmp71
tmp78 = tl.where(tmp24, tmp17, tmp10)
tmp79 = tl.where(tmp14, tmp75, tmp78)
tmp80 = tl.where(tmp7, tmp36, tmp79)
tmp81 = tl.full(tmp80.shape, 0.0, tmp80.dtype)
tmp82 = tl.where(tmp71, tmp80, tmp81)
tmp83 = tl.where(tmp60, tmp70, tmp82)
tmp84 = tl.where(tmp47, tmp57, tmp83)
tmp85 = tl.where(tmp4, tmp44, tmp84)
tl.store(out_ptr0 + (x2), tmp85, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (1, 1), (1, 1))
assert_size_stride(arg2_1, (1, 1), (1, 1))
assert_size_stride(arg3_1, (1, 1), (1, 1))
assert_size_stride(arg4_1, (1, 1), (1, 1))
assert_size_stride(arg5_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_kernels_4_quaternion], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg4_1, arg1_1, arg2_1, arg3_1, buf0, 16, grid=grid(16), stream=stream0)
del arg1_1
del arg2_1
del arg3_1
del arg4_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [addmm], Original ATen: [aten.addmm]
extern_kernels.addmm(arg5_1, arg0_1, buf0, alpha=1, beta=1, out=buf1)
del arg0_1
del arg5_1
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
arg5_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import numpy as np
from numpy.random import RandomState
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from scipy.stats import chi
import torch.fx
def quaternion_init(in_features, out_features, rng, kernel_size=None,
criterion='glorot'):
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
fan_in = in_features * receptive_field
fan_out = out_features * receptive_field
else:
fan_in = in_features
fan_out = out_features
if criterion == 'glorot':
s = 1.0 / np.sqrt(2 * (fan_in + fan_out))
elif criterion == 'he':
s = 1.0 / np.sqrt(2 * fan_in)
else:
raise ValueError('Invalid criterion: ' + criterion)
rng = RandomState(np.random.randint(1, 1234))
if kernel_size is None:
kernel_shape = in_features, out_features
elif type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
modulus = chi.rvs(4, loc=0, scale=s, size=kernel_shape)
number_of_weights = np.prod(kernel_shape)
v_i = np.random.uniform(-1.0, 1.0, number_of_weights)
v_j = np.random.uniform(-1.0, 1.0, number_of_weights)
v_k = np.random.uniform(-1.0, 1.0, number_of_weights)
for i in range(0, number_of_weights):
norm = np.sqrt(v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2 + 0.0001)
v_i[i] /= norm
v_j[i] /= norm
v_k[i] /= norm
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)
weight_r = modulus * np.cos(phase)
weight_i = modulus * v_i * np.sin(phase)
weight_j = modulus * v_j * np.sin(phase)
weight_k = modulus * v_k * np.sin(phase)
return weight_r, weight_i, weight_j, weight_k
def unitary_init(in_features, out_features, rng, kernel_size=None,
criterion='he'):
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
in_features * receptive_field
out_features * receptive_field
else:
pass
if kernel_size is None:
kernel_shape = in_features, out_features
elif type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
number_of_weights = np.prod(kernel_shape)
v_r = np.random.uniform(-1.0, 1.0, number_of_weights)
v_i = np.random.uniform(-1.0, 1.0, number_of_weights)
v_j = np.random.uniform(-1.0, 1.0, number_of_weights)
v_k = np.random.uniform(-1.0, 1.0, number_of_weights)
for i in range(0, number_of_weights):
norm = np.sqrt(v_r[i] ** 2 + v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2
) + 0.0001
v_r[i] /= norm
v_i[i] /= norm
v_j[i] /= norm
v_k[i] /= norm
v_r = v_r.reshape(kernel_shape)
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
return v_r, v_i, v_j, v_k
def affect_init(r_weight, i_weight, j_weight, k_weight, init_func, rng,
init_criterion):
if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size(
) or r_weight.size() != k_weight.size():
raise ValueError(
'The real and imaginary weights should have the same size . Found: r:'
+ str(r_weight.size()) + ' i:' + str(i_weight.size()) + ' j:' +
str(j_weight.size()) + ' k:' + str(k_weight.size()))
elif r_weight.dim() != 2:
raise Exception(
'affect_init accepts only matrices. Found dimension = ' + str(
r_weight.dim()))
kernel_size = None
r, i, j, k = init_func(r_weight.size(0), r_weight.size(1), rng,
kernel_size, init_criterion)
r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j
), torch.from_numpy(k)
r_weight.data = r.type_as(r_weight.data)
i_weight.data = i.type_as(i_weight.data)
j_weight.data = j.type_as(j_weight.data)
k_weight.data = k.type_as(k_weight.data)
def check_input(input):
if input.dim() not in {2, 3, 4, 5}:
raise RuntimeError(
'Quaternion linear accepts only input of dimension 2 or 3. Quaternion conv accepts up to 5 dim input.dim = '
+ str(input.dim()))
if input.dim() < 4:
nb_hidden = input.size()[-1]
else:
nb_hidden = input.size()[1]
if nb_hidden % 4 != 0:
raise RuntimeError(
'Quaternion Tensors must be divisible by 4. input.size()[1] = ' +
str(nb_hidden))
def get_i(input):
if input.dim() < 4:
nb_hidden = input.size()[-1]
else:
nb_hidden = input.size()[1]
if input.dim() == 2:
return input.narrow(1, nb_hidden // 4, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, nb_hidden // 4, nb_hidden // 4)
if input.dim() >= 4:
return input.narrow(1, nb_hidden // 4, nb_hidden // 4)
def get_j(input):
check_input(input)
if input.dim() < 4:
nb_hidden = input.size()[-1]
else:
nb_hidden = input.size()[1]
if input.dim() == 2:
return input.narrow(1, nb_hidden // 2, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, nb_hidden // 2, nb_hidden // 4)
if input.dim() >= 4:
return input.narrow(1, nb_hidden // 2, nb_hidden // 4)
def get_k(input):
check_input(input)
if input.dim() < 4:
nb_hidden = input.size()[-1]
else:
nb_hidden = input.size()[1]
if input.dim() == 2:
return input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, nb_hidden - nb_hidden // 4, nb_hidden // 4)
if input.dim() >= 4:
return input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4)
def get_r(input):
check_input(input)
if input.dim() < 4:
nb_hidden = input.size()[-1]
else:
nb_hidden = input.size()[1]
if input.dim() == 2:
return input.narrow(1, 0, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, 0, nb_hidden // 4)
if input.dim() >= 4:
return input.narrow(1, 0, nb_hidden // 4)
class QuaternionLinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias=None):
ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight,
bias)
check_input(input)
cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -
k_weight], dim=0)
cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight,
j_weight], dim=0)
cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -
i_weight], dim=0)
cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight,
r_weight], dim=0)
cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r,
cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=1)
if input.dim() == 2:
if bias is not None:
return torch.addmm(bias, input, cat_kernels_4_quaternion)
else:
return torch.mm(input, cat_kernels_4_quaternion)
else:
output = torch.matmul(input, cat_kernels_4_quaternion)
if bias is not None:
return output + bias
else:
return output
@staticmethod
def backward(ctx, grad_output):
input, r_weight, i_weight, j_weight, k_weight, _bias = (ctx.
saved_tensors)
(grad_input) = (grad_weight_r) = (grad_weight_i) = (grad_weight_j) = (
grad_weight_k) = (grad_bias) = None
input_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)
input_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=0)
input_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=0)
input_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=0)
cat_kernels_4_quaternion_T = Variable(torch.cat([input_r, input_i,
input_j, input_k], dim=1).permute(1, 0), requires_grad=False)
r = get_r(input)
i = get_i(input)
j = get_j(input)
k = get_k(input)
input_r = torch.cat([r, -i, -j, -k], dim=0)
input_i = torch.cat([i, r, -k, j], dim=0)
input_j = torch.cat([j, k, r, -i], dim=0)
input_k = torch.cat([k, -j, i, r], dim=0)
input_mat = Variable(torch.cat([input_r, input_i, input_j, input_k],
dim=1), requires_grad=False)
r = get_r(grad_output)
i = get_i(grad_output)
j = get_j(grad_output)
k = get_k(grad_output)
input_r = torch.cat([r, i, j, k], dim=1)
input_i = torch.cat([-i, r, k, -j], dim=1)
input_j = torch.cat([-j, -k, r, i], dim=1)
input_k = torch.cat([-k, j, -i, r], dim=1)
grad_mat = torch.cat([input_r, input_i, input_j, input_k], dim=0)
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(cat_kernels_4_quaternion_T)
if ctx.needs_input_grad[1]:
grad_weight = grad_mat.permute(1, 0).mm(input_mat).permute(1, 0)
unit_size_x = r_weight.size(0)
unit_size_y = r_weight.size(1)
grad_weight_r = grad_weight.narrow(0, 0, unit_size_x).narrow(1,
0, unit_size_y)
grad_weight_i = grad_weight.narrow(0, 0, unit_size_x).narrow(1,
unit_size_y, unit_size_y)
grad_weight_j = grad_weight.narrow(0, 0, unit_size_x).narrow(1,
unit_size_y * 2, unit_size_y)
grad_weight_k = grad_weight.narrow(0, 0, unit_size_x).narrow(1,
unit_size_y * 3, unit_size_y)
if ctx.needs_input_grad[5]:
grad_bias = grad_output.sum(0).squeeze(0)
return (grad_input, grad_weight_r, grad_weight_i, grad_weight_j,
grad_weight_k, grad_bias)
class QuaternionLinear(Module):
"""Applies a quaternion linear transformation to the incoming data.
"""
def __init__(self, in_features, out_features, bias=True, init_criterion
='he', weight_init='quaternion', seed=None):
super(QuaternionLinear, self).__init__()
self.in_features = in_features // 4
self.out_features = out_features // 4
self.r_weight = Parameter(torch.Tensor(self.in_features, self.
out_features))
self.i_weight = Parameter(torch.Tensor(self.in_features, self.
out_features))
self.j_weight = Parameter(torch.Tensor(self.in_features, self.
out_features))
self.k_weight = Parameter(torch.Tensor(self.in_features, self.
out_features))
if bias:
self.bias = Parameter(torch.Tensor(self.out_features * 4))
else:
self.register_parameter('bias', None)
self.init_criterion = init_criterion
self.weight_init = weight_init
self.seed = seed if seed is not None else np.random.randint(0, 1234)
self.rng = RandomState(self.seed)
self.reset_parameters()
def reset_parameters(self):
winit = {'quaternion': quaternion_init, 'unitary': unitary_init}[self
.weight_init]
if self.bias is not None:
self.bias.data.fill_(0)
affect_init(self.r_weight, self.i_weight, self.j_weight, self.
k_weight, winit, self.rng, self.init_criterion)
def forward(self, input):
if input.dim() == 3:
T, N, C = input.size()
input = input.view(T * N, C)
output = QuaternionLinearFunction.apply(input, self.r_weight,
self.i_weight, self.j_weight, self.k_weight, self.bias)
output = output.view(T, N, output.size(1))
elif input.dim() == 2:
output = QuaternionLinearFunction.apply(input, self.r_weight,
self.i_weight, self.j_weight, self.k_weight, self.bias)
else:
raise NotImplementedError
return output
def __repr__(self):
return self.__class__.__name__ + '(' + 'in_features=' + str(self.
in_features) + ', out_features=' + str(self.out_features
) + ', bias=' + str(self.bias is not None
) + ', init_criterion=' + str(self.init_criterion
) + ', weight_init=' + str(self.weight_init) + ', seed=' + str(self
.seed) + ')'
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import numpy as np
from numpy.random import RandomState
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from scipy.stats import chi
import torch.fx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp9 = tl.load(in_ptr0 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp16 = tl.load(in_ptr1 + 0)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp26 = tl.load(in_ptr2 + 0)
tmp27 = tl.broadcast_to(tmp26, [XBLOCK])
tmp35 = tl.load(in_ptr3 + 0)
tmp36 = tl.broadcast_to(tmp35, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = x1
tmp7 = tmp5 < tmp3
tmp7 & tmp4
tmp11 = tmp5 >= tmp3
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp5 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tmp14 & tmp4
tmp18 = -tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp15, tmp18, tmp19)
tmp21 = tmp5 >= tmp12
tmp22 = tl.full([1], 3, tl.int64)
tmp23 = tmp5 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tmp24 & tmp4
tmp28 = -tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp25, tmp28, tmp29)
tmp31 = tmp5 >= tmp22
tl.full([1], 4, tl.int64)
tmp34 = tmp31 & tmp4
tmp37 = -tmp36
tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype)
tmp39 = tl.where(tmp34, tmp37, tmp38)
tmp40 = tl.where(tmp24, tmp30, tmp39)
tmp41 = tl.where(tmp14, tmp20, tmp40)
tmp42 = tl.where(tmp7, tmp10, tmp41)
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp4, tmp42, tmp43)
tmp45 = tmp0 >= tmp3
tmp46 = tmp0 < tmp12
tmp47 = tmp45 & tmp46
tmp7 & tmp47
tmp14 & tmp47
tmp50 = tmp24 & tmp47
tmp51 = tl.where(tmp50, tmp37, tmp38)
tmp31 & tmp47
tmp53 = tl.where(tmp24, tmp51, tmp27)
tmp54 = tl.where(tmp14, tmp10, tmp53)
tmp55 = tl.where(tmp7, tmp17, tmp54)
tmp56 = tl.full(tmp55.shape, 0.0, tmp55.dtype)
tmp57 = tl.where(tmp47, tmp55, tmp56)
tmp58 = tmp0 >= tmp12
tmp59 = tmp0 < tmp22
tmp60 = tmp58 & tmp59
tmp7 & tmp60
tmp14 & tmp60
tmp24 & tmp60
tmp64 = tmp31 & tmp60
tmp65 = tl.where(tmp64, tmp18, tmp19)
tmp66 = tl.where(tmp24, tmp10, tmp65)
tmp67 = tl.where(tmp14, tmp36, tmp66)
tmp68 = tl.where(tmp7, tmp27, tmp67)
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp60, tmp68, tmp69)
tmp71 = tmp0 >= tmp22
tmp7 & tmp71
tmp74 = tmp14 & tmp71
tmp75 = tl.where(tmp74, tmp28, tmp29)
tmp24 & tmp71
tmp31 & tmp71
tmp78 = tl.where(tmp24, tmp17, tmp10)
tmp79 = tl.where(tmp14, tmp75, tmp78)
tmp80 = tl.where(tmp7, tmp36, tmp79)
tmp81 = tl.full(tmp80.shape, 0.0, tmp80.dtype)
tmp82 = tl.where(tmp71, tmp80, tmp81)
tmp83 = tl.where(tmp60, tmp70, tmp82)
tmp84 = tl.where(tmp47, tmp57, tmp83)
tmp85 = tl.where(tmp4, tmp44, tmp84)
tl.store(out_ptr0 + x2, tmp85, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (1, 1), (1, 1))
assert_size_stride(arg2_1, (1, 1), (1, 1))
assert_size_stride(arg3_1, (1, 1), (1, 1))
assert_size_stride(arg4_1, (1, 1), (1, 1))
assert_size_stride(arg5_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](arg4_1, arg1_1, arg2_1, arg3_1,
buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
del arg2_1
del arg3_1
del arg4_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(arg5_1, arg0_1, buf0, alpha=1, beta=1, out=buf1)
del arg0_1
del arg5_1
del buf0
return buf1,
def quaternion_init(in_features, out_features, rng, kernel_size=None,
criterion='glorot'):
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
fan_in = in_features * receptive_field
fan_out = out_features * receptive_field
else:
fan_in = in_features
fan_out = out_features
if criterion == 'glorot':
s = 1.0 / np.sqrt(2 * (fan_in + fan_out))
elif criterion == 'he':
s = 1.0 / np.sqrt(2 * fan_in)
else:
raise ValueError('Invalid criterion: ' + criterion)
rng = RandomState(np.random.randint(1, 1234))
if kernel_size is None:
kernel_shape = in_features, out_features
elif type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
modulus = chi.rvs(4, loc=0, scale=s, size=kernel_shape)
number_of_weights = np.prod(kernel_shape)
v_i = np.random.uniform(-1.0, 1.0, number_of_weights)
v_j = np.random.uniform(-1.0, 1.0, number_of_weights)
v_k = np.random.uniform(-1.0, 1.0, number_of_weights)
for i in range(0, number_of_weights):
norm = np.sqrt(v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2 + 0.0001)
v_i[i] /= norm
v_j[i] /= norm
v_k[i] /= norm
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)
weight_r = modulus * np.cos(phase)
weight_i = modulus * v_i * np.sin(phase)
weight_j = modulus * v_j * np.sin(phase)
weight_k = modulus * v_k * np.sin(phase)
return weight_r, weight_i, weight_j, weight_k
def unitary_init(in_features, out_features, rng, kernel_size=None,
criterion='he'):
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
in_features * receptive_field
out_features * receptive_field
else:
pass
if kernel_size is None:
kernel_shape = in_features, out_features
elif type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
number_of_weights = np.prod(kernel_shape)
v_r = np.random.uniform(-1.0, 1.0, number_of_weights)
v_i = np.random.uniform(-1.0, 1.0, number_of_weights)
v_j = np.random.uniform(-1.0, 1.0, number_of_weights)
v_k = np.random.uniform(-1.0, 1.0, number_of_weights)
for i in range(0, number_of_weights):
norm = np.sqrt(v_r[i] ** 2 + v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2
) + 0.0001
v_r[i] /= norm
v_i[i] /= norm
v_j[i] /= norm
v_k[i] /= norm
v_r = v_r.reshape(kernel_shape)
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
return v_r, v_i, v_j, v_k
def affect_init(r_weight, i_weight, j_weight, k_weight, init_func, rng,
init_criterion):
if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size(
) or r_weight.size() != k_weight.size():
raise ValueError(
'The real and imaginary weights should have the same size . Found: r:'
+ str(r_weight.size()) + ' i:' + str(i_weight.size()) + ' j:' +
str(j_weight.size()) + ' k:' + str(k_weight.size()))
elif r_weight.dim() != 2:
raise Exception(
'affect_init accepts only matrices. Found dimension = ' + str(
r_weight.dim()))
kernel_size = None
r, i, j, k = init_func(r_weight.size(0), r_weight.size(1), rng,
kernel_size, init_criterion)
r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torch.from_numpy(j
), torch.from_numpy(k)
r_weight.data = r.type_as(r_weight.data)
i_weight.data = i.type_as(i_weight.data)
j_weight.data = j.type_as(j_weight.data)
k_weight.data = k.type_as(k_weight.data)
def check_input(input):
if input.dim() not in {2, 3, 4, 5}:
raise RuntimeError(
'Quaternion linear accepts only input of dimension 2 or 3. Quaternion conv accepts up to 5 dim input.dim = '
+ str(input.dim()))
if input.dim() < 4:
nb_hidden = input.size()[-1]
else:
nb_hidden = input.size()[1]
if nb_hidden % 4 != 0:
raise RuntimeError(
'Quaternion Tensors must be divisible by 4. input.size()[1] = ' +
str(nb_hidden))
def get_i(input):
if input.dim() < 4:
nb_hidden = input.size()[-1]
else:
nb_hidden = input.size()[1]
if input.dim() == 2:
return input.narrow(1, nb_hidden // 4, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, nb_hidden // 4, nb_hidden // 4)
if input.dim() >= 4:
return input.narrow(1, nb_hidden // 4, nb_hidden // 4)
def get_j(input):
check_input(input)
if input.dim() < 4:
nb_hidden = input.size()[-1]
else:
nb_hidden = input.size()[1]
if input.dim() == 2:
return input.narrow(1, nb_hidden // 2, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, nb_hidden // 2, nb_hidden // 4)
if input.dim() >= 4:
return input.narrow(1, nb_hidden // 2, nb_hidden // 4)
def get_k(input):
check_input(input)
if input.dim() < 4:
nb_hidden = input.size()[-1]
else:
nb_hidden = input.size()[1]
if input.dim() == 2:
return input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, nb_hidden - nb_hidden // 4, nb_hidden // 4)
if input.dim() >= 4:
return input.narrow(1, nb_hidden - nb_hidden // 4, nb_hidden // 4)
def get_r(input):
check_input(input)
if input.dim() < 4:
nb_hidden = input.size()[-1]
else:
nb_hidden = input.size()[1]
if input.dim() == 2:
return input.narrow(1, 0, nb_hidden // 4)
if input.dim() == 3:
return input.narrow(2, 0, nb_hidden // 4)
if input.dim() >= 4:
return input.narrow(1, 0, nb_hidden // 4)
class QuaternionLinearFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, r_weight, i_weight, j_weight, k_weight, bias=None):
ctx.save_for_backward(input, r_weight, i_weight, j_weight, k_weight,
bias)
check_input(input)
cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -
k_weight], dim=0)
cat_kernels_4_i = torch.cat([i_weight, r_weight, -k_weight,
j_weight], dim=0)
cat_kernels_4_j = torch.cat([j_weight, k_weight, r_weight, -
i_weight], dim=0)
cat_kernels_4_k = torch.cat([k_weight, -j_weight, i_weight,
r_weight], dim=0)
cat_kernels_4_quaternion = torch.cat([cat_kernels_4_r,
cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=1)
if input.dim() == 2:
if bias is not None:
return torch.addmm(bias, input, cat_kernels_4_quaternion)
else:
return torch.mm(input, cat_kernels_4_quaternion)
else:
output = torch.matmul(input, cat_kernels_4_quaternion)
if bias is not None:
return output + bias
else:
return output
@staticmethod
def backward(ctx, grad_output):
input, r_weight, i_weight, j_weight, k_weight, _bias = (ctx.
saved_tensors)
(grad_input) = (grad_weight_r) = (grad_weight_i) = (grad_weight_j) = (
grad_weight_k) = (grad_bias) = None
input_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=0)
input_i = torch.cat([i_weight, r_weight, -k_weight, j_weight], dim=0)
input_j = torch.cat([j_weight, k_weight, r_weight, -i_weight], dim=0)
input_k = torch.cat([k_weight, -j_weight, i_weight, r_weight], dim=0)
cat_kernels_4_quaternion_T = Variable(torch.cat([input_r, input_i,
input_j, input_k], dim=1).permute(1, 0), requires_grad=False)
r = get_r(input)
i = get_i(input)
j = get_j(input)
k = get_k(input)
input_r = torch.cat([r, -i, -j, -k], dim=0)
input_i = torch.cat([i, r, -k, j], dim=0)
input_j = torch.cat([j, k, r, -i], dim=0)
input_k = torch.cat([k, -j, i, r], dim=0)
input_mat = Variable(torch.cat([input_r, input_i, input_j, input_k],
dim=1), requires_grad=False)
r = get_r(grad_output)
i = get_i(grad_output)
j = get_j(grad_output)
k = get_k(grad_output)
input_r = torch.cat([r, i, j, k], dim=1)
input_i = torch.cat([-i, r, k, -j], dim=1)
input_j = torch.cat([-j, -k, r, i], dim=1)
input_k = torch.cat([-k, j, -i, r], dim=1)
grad_mat = torch.cat([input_r, input_i, input_j, input_k], dim=0)
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(cat_kernels_4_quaternion_T)
if ctx.needs_input_grad[1]:
grad_weight = grad_mat.permute(1, 0).mm(input_mat).permute(1, 0)
unit_size_x = r_weight.size(0)
unit_size_y = r_weight.size(1)
grad_weight_r = grad_weight.narrow(0, 0, unit_size_x).narrow(1,
0, unit_size_y)
grad_weight_i = grad_weight.narrow(0, 0, unit_size_x).narrow(1,
unit_size_y, unit_size_y)
grad_weight_j = grad_weight.narrow(0, 0, unit_size_x).narrow(1,
unit_size_y * 2, unit_size_y)
grad_weight_k = grad_weight.narrow(0, 0, unit_size_x).narrow(1,
unit_size_y * 3, unit_size_y)
if ctx.needs_input_grad[5]:
grad_bias = grad_output.sum(0).squeeze(0)
return (grad_input, grad_weight_r, grad_weight_i, grad_weight_j,
grad_weight_k, grad_bias)
class QuaternionLinearNew(Module):
"""Applies a quaternion linear transformation to the incoming data.
"""
def __init__(self, in_features, out_features, bias=True, init_criterion
='he', weight_init='quaternion', seed=None):
super(QuaternionLinearNew, self).__init__()
self.in_features = in_features // 4
self.out_features = out_features // 4
self.r_weight = Parameter(torch.Tensor(self.in_features, self.
out_features))
self.i_weight = Parameter(torch.Tensor(self.in_features, self.
out_features))
self.j_weight = Parameter(torch.Tensor(self.in_features, self.
out_features))
self.k_weight = Parameter(torch.Tensor(self.in_features, self.
out_features))
if bias:
self.bias = Parameter(torch.Tensor(self.out_features * 4))
else:
self.register_parameter('bias', None)
self.init_criterion = init_criterion
self.weight_init = weight_init
self.seed = seed if seed is not None else np.random.randint(0, 1234)
self.rng = RandomState(self.seed)
self.reset_parameters()
def reset_parameters(self):
winit = {'quaternion': quaternion_init, 'unitary': unitary_init}[self
.weight_init]
if self.bias is not None:
self.bias.data.fill_(0)
affect_init(self.r_weight, self.i_weight, self.j_weight, self.
k_weight, winit, self.rng, self.init_criterion)
def __repr__(self):
return self.__class__.__name__ + '(' + 'in_features=' + str(self.
in_features) + ', out_features=' + str(self.out_features
) + ', bias=' + str(self.bias is not None
) + ', init_criterion=' + str(self.init_criterion
) + ', weight_init=' + str(self.weight_init) + ', seed=' + str(self
.seed) + ')'
def forward(self, input_0):
arg1_1 = self.r_weight
arg2_1 = self.i_weight
arg3_1 = self.j_weight
arg4_1 = self.k_weight
arg5_1 = self.bias
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return output[0]
| eleGAN23/HI2I | QuaternionLinear | false | 6,678 | [
"MIT"
] | 1 | 7730ee0963614290099b011c113048ef6d1b149c | https://github.com/eleGAN23/HI2I/tree/7730ee0963614290099b011c113048ef6d1b149c | from torch.nn import Module
import torch
import numpy as np
from numpy.random import RandomState
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from scipy.stats import chi
import torch.fx
def quaternion_init(in_features, out_features, rng, kernel_size=None,
criterion='glorot'):
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
fan_in = in_features * receptive_field
fan_out = out_features * receptive_field
else:
fan_in = in_features
fan_out = out_features
if criterion == 'glorot':
s = 1.0 / np.sqrt(2 * (fan_in + fan_out))
elif criterion == 'he':
s = 1.0 / np.sqrt(2 * fan_in)
else:
raise ValueError('Invalid criterion: ' + criterion)
rng = RandomState(np.random.randint(1, 1234))
if kernel_size is None:
kernel_shape = in_features, out_features
elif type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
modulus = chi.rvs(4, loc=0, scale=s, size=kernel_shape)
number_of_weights = np.prod(kernel_shape)
v_i = np.random.uniform(-1.0, 1.0, number_of_weights)
v_j = np.random.uniform(-1.0, 1.0, number_of_weights)
v_k = np.random.uniform(-1.0, 1.0, number_of_weights)
for i in range(0, number_of_weights):
norm = np.sqrt(v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2 + 0.0001)
v_i[i] /= norm
v_j[i] /= norm
v_k[i] /= norm
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)
weight_r = modulus * np.cos(phase)
weight_i = modulus * v_i * np.sin(phase)
weight_j = modulus * v_j * np.sin(phase)
weight_k = modulus * v_k * np.sin(phase)
return weight_r, weight_i, weight_j, weight_k
def unitary_init(in_features, out_features, rng, kernel_size=None,
criterion='he'):
if kernel_size is not None:
receptive_field = np.prod(kernel_size)
in_features * receptive_field
out_features * receptive_field
else:
pass
if kernel_size is None:
kernel_shape = in_features, out_features
elif type(kernel_size) is int:
kernel_shape = (out_features, in_features) + tuple((kernel_size,))
else:
kernel_shape = (out_features, in_features) + (*kernel_size,)
number_of_weights = np.prod(kernel_shape)
v_r = np.random.uniform(-1.0, 1.0, number_of_weights)
v_i = np.random.uniform(-1.0, 1.0, number_of_weights)
v_j = np.random.uniform(-1.0, 1.0, number_of_weights)
v_k = np.random.uniform(-1.0, 1.0, number_of_weights)
for i in range(0, number_of_weights):
norm = np.sqrt(v_r[i] ** 2 + v_i[i] ** 2 + v_j[i] ** 2 + v_k[i] ** 2
) + 0.0001
v_r[i] /= norm
v_i[i] /= norm
v_j[i] /= norm
v_k[i] /= norm
v_r = v_r.reshape(kernel_shape)
v_i = v_i.reshape(kernel_shape)
v_j = v_j.reshape(kernel_shape)
v_k = v_k.reshape(kernel_shape)
return v_r, v_i, v_j, v_k
def affect_init(r_weight, i_weight, j_weight, k_weight, init_func, rng,
init_criterion):
if r_weight.size() != i_weight.size() or r_weight.size() != j_weight.size(
) or r_weight.size() != k_weight.size():
raise ValueError(
'The real and imaginary weights should have the same size . Found: r:'
+ str(r_weight.size()) + ' i:' + str(i_weight.size()) + ' j:' +
str(j_weight.size()) + ' k:' + str(k_weight.size()))
elif r_weight.dim() != 2:
raise Exception(
'affect_init accepts only matrices. Found dimension = ' + str(
r_weight.dim()))
kernel_size = None
r, i, j, k = init_func(r_weight.size(0), r_weight.size(1), rng,
kernel_size, init_criterion)
r, i, j, k = torch.from_numpy(r), torch.from_numpy(i), torc
# ... truncated (>4000 chars) for memory efficiency |
MultiHeadedAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3u/c3ui6pflkqqmwiicu3k3k6nxfn3zxrzgar4nyb7sxfkreg6ab7we.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_weights => div, exp, sum_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2)
del primals_2
buf3 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 16), (64, 1, 4), 0), out=buf3)
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_0.run(buf3, buf6, 64, 16, grid=grid(64), stream=stream0)
del buf3
buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attended], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf8)
return (reinterpret_tensor(buf8, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf6, (1, 4, 16, 16), (256, 256, 16, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), primals_5, reinterpret_tensor(buf0, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0), reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
def same_tensor(tensor, *args):
""" Do the input tensors all point to the same underlying data """
for other in args:
if not torch.is_tensor(other):
return False
if tensor.device != other.device:
return False
if tensor.dtype != other.dtype:
return False
if tensor.data_ptr() != other.data_ptr():
return False
return True
class MultiHeadedAttention(nn.Module):
""" Implement a multi-headed attention module """
def __init__(self, embed_dim, num_heads=1):
""" Initialize the attention module """
super(MultiHeadedAttention, self).__init__()
assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.projection_dim = embed_dim // num_heads
self.scale = self.projection_dim ** -0.5
self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim,
embed_dim))
self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
""" Reset parameters using xavier initialization """
gain = nn.init.calculate_gain('linear')
nn.init.xavier_uniform_(self.input_weights, gain)
nn.init.xavier_uniform_(self.output_projection.weight, gain)
def project(self, inputs, index=0, chunks=1):
""" Produce a linear projection using the weights """
batch_size = inputs.shape[0]
start = index * self.embed_dim
end = start + chunks * self.embed_dim
projections = F.linear(inputs, self.input_weights[start:end]).chunk(
chunks, dim=-1)
output_projections = []
for projection in projections:
output_projections.append(projection.view(batch_size, -1, self.
num_heads, self.projection_dim).transpose(2, 1).contiguous(
).view(batch_size * self.num_heads, -1, self.projection_dim))
return output_projections
def attention(self, values, keys, queries, key_mask=None, mask=None):
""" Scaled dot product attention with optional masks """
logits = self.scale * torch.bmm(queries, keys.transpose(2, 1))
if mask is not None:
logits += mask
if key_mask is not None:
logits_shape = logits.shape
batch_size = logits_shape[0] // self.num_heads
logits = logits.view(batch_size, self.num_heads, logits_shape[1
], logits_shape[2])
logits.masked_fill_(key_mask[:, None, None], float('-inf'))
logits = logits.view(logits_shape)
attn_weights = F.softmax(logits, dim=-1)
attended = torch.bmm(attn_weights, values)
batch_size = queries.shape[0] // self.num_heads
return attended.view(batch_size, self.num_heads, -1, self.
projection_dim).transpose(2, 1).contiguous().view(batch_size, -
1, self.num_heads * self.projection_dim), attn_weights.view(
batch_size, self.num_heads, attn_weights.size()[1], -1).transpose(
0, 1).contiguous()
def forward(self, values, keys, queries, key_mask=None, attention_mask=
None, num_queries=0):
""" Forward pass of the attention """
if same_tensor(values, keys, queries):
values, keys, queries = self.project(values, chunks=3)
elif same_tensor(values, keys):
values, keys = self.project(values, chunks=2)
queries, = self.project(queries, 2)
else:
values, = self.project(values, 0)
keys, = self.project(keys, 1)
queries, = self.project(queries, 2)
if num_queries:
queries = queries[:, -num_queries:]
attended, attn_weights = self.attention(values, keys, queries,
key_mask, attention_mask)
return self.output_projection(attended), attn_weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2)
del primals_2
buf3 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 16, 4), (64, 4, 1),
0), reinterpret_tensor(buf1, (4, 4, 16), (64, 1, 4), 0), out=buf3)
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(64)](buf3, buf6, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf3
buf7 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf0, (4, 16, 4), (64,
4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf8)
return reinterpret_tensor(buf8, (4, 16, 4), (64, 4, 1), 0
), reinterpret_tensor(buf6, (1, 4, 16, 16), (256, 256, 16, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf7, (64, 4), (4, 1), 0
), primals_5, reinterpret_tensor(buf0, (4, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(buf2, (4, 4, 16), (64, 1, 4), 0
), reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0)
def same_tensor(tensor, *args):
""" Do the input tensors all point to the same underlying data """
for other in args:
if not torch.is_tensor(other):
return False
if tensor.device != other.device:
return False
if tensor.dtype != other.dtype:
return False
if tensor.data_ptr() != other.data_ptr():
return False
return True
class MultiHeadedAttentionNew(nn.Module):
""" Implement a multi-headed attention module """
def __init__(self, embed_dim, num_heads=1):
""" Initialize the attention module """
super(MultiHeadedAttentionNew, self).__init__()
assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.projection_dim = embed_dim // num_heads
self.scale = self.projection_dim ** -0.5
self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim,
embed_dim))
self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
""" Reset parameters using xavier initialization """
gain = nn.init.calculate_gain('linear')
nn.init.xavier_uniform_(self.input_weights, gain)
nn.init.xavier_uniform_(self.output_projection.weight, gain)
def project(self, inputs, index=0, chunks=1):
""" Produce a linear projection using the weights """
batch_size = inputs.shape[0]
start = index * self.embed_dim
end = start + chunks * self.embed_dim
projections = F.linear(inputs, self.input_weights[start:end]).chunk(
chunks, dim=-1)
output_projections = []
for projection in projections:
output_projections.append(projection.view(batch_size, -1, self.
num_heads, self.projection_dim).transpose(2, 1).contiguous(
).view(batch_size * self.num_heads, -1, self.projection_dim))
return output_projections
def attention(self, values, keys, queries, key_mask=None, mask=None):
""" Scaled dot product attention with optional masks """
logits = self.scale * torch.bmm(queries, keys.transpose(2, 1))
if mask is not None:
logits += mask
if key_mask is not None:
logits_shape = logits.shape
batch_size = logits_shape[0] // self.num_heads
logits = logits.view(batch_size, self.num_heads, logits_shape[1
], logits_shape[2])
logits.masked_fill_(key_mask[:, None, None], float('-inf'))
logits = logits.view(logits_shape)
attn_weights = F.softmax(logits, dim=-1)
attended = torch.bmm(attn_weights, values)
batch_size = queries.shape[0] // self.num_heads
return attended.view(batch_size, self.num_heads, -1, self.
projection_dim).transpose(2, 1).contiguous().view(batch_size, -
1, self.num_heads * self.projection_dim), attn_weights.view(
batch_size, self.num_heads, attn_weights.size()[1], -1).transpose(
0, 1).contiguous()
def forward(self, input_0, input_1, input_2):
primals_2 = self.input_weights
primals_5 = self.output_projection.weight
primals_1 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| fallcat/synst | MultiHeadedAttention | false | 6,679 | [
"BSD-3-Clause"
] | 1 | 0fa4adffa825af4a62b6e739b59c4125a7b6698e | https://github.com/fallcat/synst/tree/0fa4adffa825af4a62b6e739b59c4125a7b6698e | import torch
from torch import nn
from torch.nn import functional as F
def same_tensor(tensor, *args):
""" Do the input tensors all point to the same underlying data """
for other in args:
if not torch.is_tensor(other):
return False
if tensor.device != other.device:
return False
if tensor.dtype != other.dtype:
return False
if tensor.data_ptr() != other.data_ptr():
return False
return True
class Model(nn.Module):
""" Implement a multi-headed attention module """
def __init__(self, embed_dim, num_heads=1):
""" Initialize the attention module """
super().__init__()
assert embed_dim % num_heads == 0, f'num_heads={num_heads} should evenly divide embed_dim={embed_dim}'
self.embed_dim = embed_dim
self.num_heads = num_heads
self.projection_dim = embed_dim // num_heads
self.scale = self.projection_dim ** -0.5
self.input_weights = nn.Parameter(torch.Tensor(3 * embed_dim,
embed_dim))
self.output_projection = nn.Linear(embed_dim, embed_dim, bias=False)
self.reset_parameters()
def reset_parameters(self):
""" Reset parameters using xavier initialization """
gain = nn.init.calculate_gain('linear')
nn.init.xavier_uniform_(self.input_weights, gain)
nn.init.xavier_uniform_(self.output_projection.weight, gain)
def project(self, inputs, index=0, chunks=1):
""" Produce a linear projection using the weights """
batch_size = inputs.shape[0]
start = index * self.embed_dim
end = start + chunks * self.embed_dim
projections = F.linear(inputs, self.input_weights[start:end]).chunk(
chunks, dim=-1)
output_projections = []
for projection in projections:
output_projections.append(projection.view(batch_size, -1, self.
num_heads, self.projection_dim).transpose(2, 1).contiguous(
).view(batch_size * self.num_heads, -1, self.projection_dim))
return output_projections
def attention(self, values, keys, queries, key_mask=None, mask=None):
""" Scaled dot product attention with optional masks """
logits = self.scale * torch.bmm(queries, keys.transpose(2, 1))
if mask is not None:
logits += mask
if key_mask is not None:
logits_shape = logits.shape
batch_size = logits_shape[0] // self.num_heads
logits = logits.view(batch_size, self.num_heads, logits_shape[1
], logits_shape[2])
logits.masked_fill_(key_mask[:, None, None], float('-inf'))
logits = logits.view(logits_shape)
attn_weights = F.softmax(logits, dim=-1)
attended = torch.bmm(attn_weights, values)
batch_size = queries.shape[0] // self.num_heads
return attended.view(batch_size, self.num_heads, -1, self.
projection_dim).transpose(2, 1).contiguous().view(batch_size, -
1, self.num_heads * self.projection_dim), attn_weights.view(
batch_size, self.num_heads, attn_weights.size()[1], -1).transpose(
0, 1).contiguous()
def forward(self, values, keys, queries, key_mask=None, attention_mask=
None, num_queries=0):
""" Forward pass of the attention """
if same_tensor(values, keys, queries):
values, keys, queries = self.project(values, chunks=3)
elif same_tensor(values, keys):
values, keys = self.project(values, chunks=2)
queries, = self.project(queries, 2)
else:
values, = self.project(values, 0)
keys, = self.project(keys, 1)
queries, = self.project(queries, 2)
if num_queries:
queries = queries[:, -num_queries:]
attended, attn_weights = self.attention(values, keys, queries,
key_mask, attention_mask)
return self.outp
# ... truncated (>4000 chars) for memory efficiency |
MultiLayeredConv1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zt/cztfdbfdeuswkfmqcigzocsq5mos7eqthkdqr2u3uktw4kuq7d5w.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 3)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mh/cmhvpi42ibq2v7tidubg2uoo7emx6l6p4w5dmszccsffhaiuewi7.py
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 3), (12, 3, 1))
buf1 = reinterpret_tensor(buf0, (4, 3), (3, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 3), (3, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf4, 12, grid=grid(12), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 3), (0, 3, 1), 0), primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf2, (1, 4, 2), (8, 2, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 8, grid=grid(8), stream=stream0)
del primals_5
return (reinterpret_tensor(buf3, (4, 2), (2, 1), 0), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 3), (12, 3, 1), 0), buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class MultiLayeredConv1d(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network
in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize MultiLayeredConv1d module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(MultiLayeredConv1d, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, *, in_chans).
Returns:
Tensor: Batch of output tensors (B, *, hidden_chans)
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_chans': 4, 'hidden_chans': 4, 'kernel_size': 4,
'dropout_rate': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 3), (12, 3, 1))
buf1 = reinterpret_tensor(buf0, (4, 3), (3, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 3), (3, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(12)](buf1,
primals_3, buf4, 12, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 3
), (0, 3, 1), 0), primals_4, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf2, (1, 4, 2), (8, 2, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(8)](buf3, primals_5, 8, XBLOCK=
8, num_warps=1, num_stages=1)
del primals_5
return reinterpret_tensor(buf3, (4, 2), (2, 1), 0
), primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4), (
16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 3), (12, 3, 1), 0), buf4
class MultiLayeredConv1dNew(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network
in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize MultiLayeredConv1d module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(MultiLayeredConv1dNew, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, input_0):
primals_2 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| fancyliumeng/asv-subtools | MultiLayeredConv1d | false | 6,680 | [
"Apache-2.0"
] | 1 | 56a13484472e7ae6eb00d762c00d57e581e78eb4 | https://github.com/fancyliumeng/asv-subtools/tree/56a13484472e7ae6eb00d762c00d57e581e78eb4 | import torch
import torch.nn
class Model(torch.nn.Module):
"""Multi-layered conv1d for Transformer block.
This is a module of multi-leyered conv1d designed to replace positionwise feed-forward network
in Transforner block, which is introduced in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize MultiLayeredConv1d module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super().__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, *, in_chans).
Returns:
Tensor: Batch of output tensors (B, *, hidden_chans)
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_chans': 4, 'hidden_chans': 4, 'kernel_size': 4,
'dropout_rate': 0.5}]
|
LDEPooling | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pp/cpp3e5hyuefsytb2bxsptaz76d4zz65bilbl56ymzdohlu4cbq56.py
# Topologically Sorted Source Nodes: [r, pow_1, sum_1, mul, w], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul, aten._softmax]
# Source node to ATen node mapping:
# mul => mul
# pow_1 => pow_1
# r => sub
# sum_1 => sum_1
# w => amax, exp, sub_1, sum_2
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %primals_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2], True), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sum_1), kwargs = {})
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%mul, [3], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [3], True), kwargs = {})
triton_per_fused__softmax_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused__softmax_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_mul_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (r2), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (r2), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (64 + r2), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (128 + r2), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (192 + r2), None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tmp4 + tmp8
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp15 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tmp0 * tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, float("-inf"))
tmp24 = triton_helpers.max2(tmp23, 1)[:, None]
tmp25 = tmp20 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tl.store(out_ptr0 + (r2 + (64*x3)), tmp20, xmask)
tl.store(out_ptr1 + (x3), tmp24, xmask)
tl.store(out_ptr2 + (x3), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5c/c5cyg6tmgybx2znb56tli3mxu474a5mhwo2sfz4cyzeil7dyv77v.py
# Topologically Sorted Source Nodes: [r, w, mul_1, e], Original ATen: [aten.sub, aten._softmax, aten.mul, aten.mean]
# Source node to ATen node mapping:
# e => mean
# mul_1 => mul_1
# r => sub
# w => div, exp, sub_1
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %primals_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul_1, [1]), kwargs = {})
triton_poi_fused__softmax_mean_mul_sub_1 = async_compile.triton('triton_poi_fused__softmax_mean_mul_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mean_mul_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mean_mul_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = (xindex // 256)
x4 = (xindex // 64)
x3 = xindex % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (4*x2), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (4*x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x3), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (64 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr3 + (1 + (4*x4)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (128 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr3 + (2 + (4*x4)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (192 + x0 + (256*x2)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr3 + (3 + (4*x4)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp5 * tmp8
tmp12 = tmp10 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp15 = tmp13 / tmp14
tmp17 = tmp16 - tmp7
tmp18 = tmp15 * tmp17
tmp19 = tmp9 + tmp18
tmp22 = tmp20 - tmp21
tmp23 = tl_math.exp(tmp22)
tmp25 = tmp23 / tmp24
tmp27 = tmp26 - tmp7
tmp28 = tmp25 * tmp27
tmp29 = tmp19 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tl_math.exp(tmp32)
tmp35 = tmp33 / tmp34
tmp37 = tmp36 - tmp7
tmp38 = tmp35 * tmp37
tmp39 = tmp29 + tmp38
tmp40 = 4.0
tmp41 = tmp39 / tmp40
tl.store(out_ptr0 + (x5), tmp41, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 64), (64, 1))
assert_size_stride(primals_3, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 64), (256, 64, 1024, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [r, pow_1, sum_1, mul, w], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul, aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_mul_pow_sub_sum_0.run(primals_3, primals_1, primals_2, buf0, buf1, buf2, 16, 64, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 64), (256, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [r, w, mul_1, e], Original ATen: [aten.sub, aten._softmax, aten.mul, aten.mean]
triton_poi_fused__softmax_mean_mul_sub_1.run(buf0, buf1, buf2, primals_1, primals_2, buf3, 1024, grid=grid(1024), stream=stream0)
del buf0
return (reinterpret_tensor(buf3, (4, 256, 1), (256, 1, 1), 0), primals_1, primals_2, primals_3, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class LDEPooling(torch.nn.Module):
"""A novel learnable dictionary encoding layer according to [Weicheng Cai, etc., "A NOVEL LEARNABLE
DICTIONARY ENCODING LAYER FOR END-TO-END LANGUAGE IDENTIFICATION", icassp, 2018]"""
def __init__(self, input_dim, c_num=64):
super(LDEPooling, self).__init__()
self.input_dim = input_dim
self.output_dim = input_dim * c_num
self.mu = torch.nn.Parameter(torch.randn(input_dim, c_num))
self.s = torch.nn.Parameter(torch.ones(c_num))
self.softmax_for_w = torch.nn.Softmax(dim=3)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
r = inputs.transpose(1, 2).unsqueeze(3) - self.mu
w = self.softmax_for_w(self.s * torch.sum(r ** 2, dim=2, keepdim=True))
e = torch.mean(w * r, dim=1)
return e.reshape(-1, self.output_dim, 1)
def get_output_dim(self):
return self.output_dim
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_mul_pow_sub_sum_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + r2, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr2 + (64 + r2), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + (128 + r2), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr2 + (192 + r2), None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp4 = tmp3 * tmp3
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tmp4 + tmp8
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tmp9 + tmp13
tmp17 = tmp15 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp14 + tmp18
tmp20 = tmp0 * tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, float('-inf'))
tmp24 = triton_helpers.max2(tmp23, 1)[:, None]
tmp25 = tmp20 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tl.store(out_ptr0 + (r2 + 64 * x3), tmp20, xmask)
tl.store(out_ptr1 + x3, tmp24, xmask)
tl.store(out_ptr2 + x3, tmp30, xmask)
@triton.jit
def triton_poi_fused__softmax_mean_mul_sub_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex // 256
x4 = xindex // 64
x3 = xindex % 256
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + 4 * x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (64 + x0 + 256 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr3 + (1 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (128 + x0 + 256 * x2), xmask, eviction_policy
='evict_last')
tmp21 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr3 + (2 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp30 = tl.load(in_ptr0 + (192 + x0 + 256 * x2), xmask, eviction_policy
='evict_last')
tmp31 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_ptr3 + (3 + 4 * x4), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp3 / tmp4
tmp8 = tmp6 - tmp7
tmp9 = tmp5 * tmp8
tmp12 = tmp10 - tmp11
tmp13 = tl_math.exp(tmp12)
tmp15 = tmp13 / tmp14
tmp17 = tmp16 - tmp7
tmp18 = tmp15 * tmp17
tmp19 = tmp9 + tmp18
tmp22 = tmp20 - tmp21
tmp23 = tl_math.exp(tmp22)
tmp25 = tmp23 / tmp24
tmp27 = tmp26 - tmp7
tmp28 = tmp25 * tmp27
tmp29 = tmp19 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tl_math.exp(tmp32)
tmp35 = tmp33 / tmp34
tmp37 = tmp36 - tmp7
tmp38 = tmp35 * tmp37
tmp39 = tmp29 + tmp38
tmp40 = 4.0
tmp41 = tmp39 / tmp40
tl.store(out_ptr0 + x5, tmp41, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 64), (64, 1))
assert_size_stride(primals_3, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 64), (256, 64, 1024, 1), torch.
float32)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_mul_pow_sub_sum_0[grid(16)](primals_3,
primals_1, primals_2, buf0, buf1, buf2, 16, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 64), (256, 64, 1), torch.float32)
triton_poi_fused__softmax_mean_mul_sub_1[grid(1024)](buf0, buf1,
buf2, primals_1, primals_2, buf3, 1024, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
return reinterpret_tensor(buf3, (4, 256, 1), (256, 1, 1), 0
), primals_1, primals_2, primals_3, buf1, buf2
class LDEPoolingNew(torch.nn.Module):
"""A novel learnable dictionary encoding layer according to [Weicheng Cai, etc., "A NOVEL LEARNABLE
DICTIONARY ENCODING LAYER FOR END-TO-END LANGUAGE IDENTIFICATION", icassp, 2018]"""
def __init__(self, input_dim, c_num=64):
super(LDEPoolingNew, self).__init__()
self.input_dim = input_dim
self.output_dim = input_dim * c_num
self.mu = torch.nn.Parameter(torch.randn(input_dim, c_num))
self.s = torch.nn.Parameter(torch.ones(c_num))
self.softmax_for_w = torch.nn.Softmax(dim=3)
def get_output_dim(self):
return self.output_dim
def forward(self, input_0):
primals_2 = self.mu
primals_3 = self.s
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| fancyliumeng/asv-subtools | LDEPooling | false | 6,681 | [
"Apache-2.0"
] | 1 | 56a13484472e7ae6eb00d762c00d57e581e78eb4 | https://github.com/fancyliumeng/asv-subtools/tree/56a13484472e7ae6eb00d762c00d57e581e78eb4 | import torch
import torch.nn
class Model(torch.nn.Module):
"""A novel learnable dictionary encoding layer according to [Weicheng Cai, etc., "A NOVEL LEARNABLE
DICTIONARY ENCODING LAYER FOR END-TO-END LANGUAGE IDENTIFICATION", icassp, 2018]"""
def __init__(self, input_dim, c_num=64):
super().__init__()
self.input_dim = input_dim
self.output_dim = input_dim * c_num
self.mu = torch.nn.Parameter(torch.randn(input_dim, c_num))
self.s = torch.nn.Parameter(torch.ones(c_num))
self.softmax_for_w = torch.nn.Softmax(dim=3)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
r = inputs.transpose(1, 2).unsqueeze(3) - self.mu
w = self.softmax_for_w(self.s * torch.sum(r ** 2, dim=2, keepdim=True))
e = torch.mean(w * r, dim=1)
return e.reshape(-1, self.output_dim, 1)
def get_output_dim(self):
return self.output_dim
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
Conv1dLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/24/c247lstrpia4xuwjphutxw2zn36dsajpgqm4zjinubb3jymwwkd3.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# linear => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = (yindex // 3)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (3*x2) + (12*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ht/chtsnjuwkomolt5fx5l7wyg4kjhr7idbfplladpp2ezlbh6j4v67.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.add]
# Source node to ATen node mapping:
# linear => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_5), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/74/c74kvo3dqgusjb7cihtkodmg7zdl3agdwtohp7aozh6d6fuoyxrj.py
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv1d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 3), (12, 3, 1))
del buf0
buf2 = empty_strided_cuda((4, 3, 4), (12, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, primals_3, buf2, 12, 4, grid=grid(12, 4), stream=stream0)
buf3 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (12, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 3, 4), (12, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf4, primals_5, 48, grid=grid(48), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_3.run(buf1, primals_3, buf5, 48, grid=grid(48), stream=stream0)
del buf1
del primals_3
return (buf4, primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (12, 4), (4, 1), 0), primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class Conv1dLinear(torch.nn.Module):
"""Conv1D + Linear for Transformer block.
A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize Conv1dLinear module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(Conv1dLinear, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Linear(hidden_chans, in_chans)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, *, in_chans).
Returns:
Tensor: Batch of output tensors (B, *, hidden_chans)
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_chans': 4, 'hidden_chans': 4, 'kernel_size': 4,
'dropout_rate': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 12
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 3
y1 = yindex // 3
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 3 * x2 + 12 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 3), (12, 3, 1))
del buf0
buf2 = empty_strided_cuda((4, 3, 4), (12, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(12, 4)](buf1, primals_3, buf2, 12, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((12, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (12, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 3, 4), (12, 4, 1), 0)
del buf3
triton_poi_fused_add_2[grid(48)](buf4, primals_5, 48, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_3[grid(48)](buf1,
primals_3, buf5, 48, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del primals_3
return buf4, primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1,
4), 0), reinterpret_tensor(buf2, (12, 4), (4, 1), 0), primals_4, buf5
class Conv1dLinearNew(torch.nn.Module):
"""Conv1D + Linear for Transformer block.
A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize Conv1dLinear module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super(Conv1dLinearNew, self).__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Linear(hidden_chans, in_chans)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, input_0):
primals_1 = self.w_1.weight
primals_3 = self.w_1.bias
primals_4 = self.w_2.weight
primals_5 = self.w_2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| fancyliumeng/asv-subtools | Conv1dLinear | false | 6,682 | [
"Apache-2.0"
] | 1 | 56a13484472e7ae6eb00d762c00d57e581e78eb4 | https://github.com/fancyliumeng/asv-subtools/tree/56a13484472e7ae6eb00d762c00d57e581e78eb4 | import torch
import torch.nn
class Model(torch.nn.Module):
"""Conv1D + Linear for Transformer block.
A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.
"""
def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):
"""Initialize Conv1dLinear module.
Args:
in_chans (int): Number of input channels.
hidden_chans (int): Number of hidden channels.
kernel_size (int): Kernel size of conv1d.
dropout_rate (float): Dropout rate.
"""
super().__init__()
self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size,
stride=1, padding=(kernel_size - 1) // 2)
self.w_2 = torch.nn.Linear(hidden_chans, in_chans)
self.dropout = torch.nn.Dropout(dropout_rate)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Batch of input tensors (B, *, in_chans).
Returns:
Tensor: Batch of output tensors (B, *, hidden_chans)
"""
x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)
return self.w_2(self.dropout(x))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_chans': 4, 'hidden_chans': 4, 'kernel_size': 4,
'dropout_rate': 0.5}]
|
TdnnAffine | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# outputs => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf1, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn
def to_device(device_object, tensor):
"""
Select device for non-parameters tensor w.r.t model or tensor which has been specified a device.
"""
if isinstance(device_object, torch.nn.Module):
next(device_object.parameters()).device
elif isinstance(device_object, torch.Tensor):
pass
return tensor
class TdnnAffine(torch.nn.Module):
""" An implemented tdnn affine component by conv1d
y = splice(w * x, context) + b
@input_dim: number of dims of frame <=> inputs channels of conv
@output_dim: number of layer nodes <=> outputs channels of conv
@context: a list of context
e.g. [-2,0,2]
If context is [0], then the TdnnAffine is equal to linear layer.
"""
def __init__(self, input_dim, output_dim, context=[0], bias=True, pad=
True, norm_w=False, norm_f=False, subsampling_factor=1):
super(TdnnAffine, self).__init__()
for index in range(0, len(context) - 1):
if context[index] >= context[index + 1]:
raise ValueError(
'Context tuple {} is invalid, such as the order.'.
format(context))
self.input_dim = input_dim
self.output_dim = output_dim
self.context = context
self.bool_bias = bias
self.pad = pad
self.norm_w = norm_w
self.norm_f = norm_f
self.stride = subsampling_factor
self.left_context = context[0] if context[0] < 0 else 0
self.right_context = context[-1] if context[-1] > 0 else 0
self.tot_context = self.right_context - self.left_context + 1
if self.tot_context > 1 and self.norm_f:
self.norm_f = False
None
kernel_size = self.tot_context,
self.weight = torch.nn.Parameter(torch.randn(output_dim, input_dim,
*kernel_size))
if self.bool_bias:
self.bias = torch.nn.Parameter(torch.randn(output_dim))
else:
self.register_parameter('bias', None)
self.init_weight()
if len(context) != self.tot_context:
self.mask = torch.tensor([[[(1 if index in context else 0) for
index in range(self.left_context, self.right_context + 1)]]])
else:
self.mask = None
self.selected_device = False
def init_weight(self):
torch.nn.init.normal_(self.weight, 0.0, 0.01)
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
if self.pad:
inputs = F.pad(inputs, (-self.left_context, self.right_context),
mode='constant', value=0)
assert inputs.shape[2] >= self.tot_context
if not self.selected_device and self.mask is not None:
self.mask = to_device(self, self.mask)
self.selected_device = True
filters = (self.weight * self.mask if self.mask is not None else
self.weight)
if self.norm_w:
filters = F.normalize(filters, dim=1)
if self.norm_f:
inputs = F.normalize(inputs, dim=1)
outputs = F.conv1d(inputs, filters, self.bias, self.stride, padding
=0, dilation=1, groups=1)
return outputs
def extra_repr(self):
return (
'{input_dim}, {output_dim}, context={context}, bias={bool_bias}, stride={stride}, pad={pad}, norm_w={norm_w}, norm_f={norm_f}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
x = x[0]
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1 if m.bias is not None else 0
total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
m.total_ops += torch.DoubleTensor([int(total_ops)])
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
def to_device(device_object, tensor):
"""
Select device for non-parameters tensor w.r.t model or tensor which has been specified a device.
"""
if isinstance(device_object, torch.nn.Module):
next(device_object.parameters()).device
elif isinstance(device_object, torch.Tensor):
pass
return tensor
class TdnnAffineNew(torch.nn.Module):
""" An implemented tdnn affine component by conv1d
y = splice(w * x, context) + b
@input_dim: number of dims of frame <=> inputs channels of conv
@output_dim: number of layer nodes <=> outputs channels of conv
@context: a list of context
e.g. [-2,0,2]
If context is [0], then the TdnnAffine is equal to linear layer.
"""
def __init__(self, input_dim, output_dim, context=[0], bias=True, pad=
True, norm_w=False, norm_f=False, subsampling_factor=1):
super(TdnnAffineNew, self).__init__()
for index in range(0, len(context) - 1):
if context[index] >= context[index + 1]:
raise ValueError(
'Context tuple {} is invalid, such as the order.'.
format(context))
self.input_dim = input_dim
self.output_dim = output_dim
self.context = context
self.bool_bias = bias
self.pad = pad
self.norm_w = norm_w
self.norm_f = norm_f
self.stride = subsampling_factor
self.left_context = context[0] if context[0] < 0 else 0
self.right_context = context[-1] if context[-1] > 0 else 0
self.tot_context = self.right_context - self.left_context + 1
if self.tot_context > 1 and self.norm_f:
self.norm_f = False
None
kernel_size = self.tot_context,
self.weight = torch.nn.Parameter(torch.randn(output_dim, input_dim,
*kernel_size))
if self.bool_bias:
self.bias = torch.nn.Parameter(torch.randn(output_dim))
else:
self.register_parameter('bias', None)
self.init_weight()
if len(context) != self.tot_context:
self.mask = torch.tensor([[[(1 if index in context else 0) for
index in range(self.left_context, self.right_context + 1)]]])
else:
self.mask = None
self.selected_device = False
def init_weight(self):
torch.nn.init.normal_(self.weight, 0.0, 0.01)
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
def extra_repr(self):
return (
'{input_dim}, {output_dim}, context={context}, bias={bool_bias}, stride={stride}, pad={pad}, norm_w={norm_w}, norm_f={norm_f}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
x = x[0]
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1 if m.bias is not None else 0
total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
m.total_ops += torch.DoubleTensor([int(total_ops)])
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| fancyliumeng/asv-subtools | TdnnAffine | false | 6,683 | [
"Apache-2.0"
] | 1 | 56a13484472e7ae6eb00d762c00d57e581e78eb4 | https://github.com/fancyliumeng/asv-subtools/tree/56a13484472e7ae6eb00d762c00d57e581e78eb4 | import torch
import torch.nn.functional as F
import torch.nn
def to_device(device_object, tensor):
"""
Select device for non-parameters tensor w.r.t model or tensor which has been specified a device.
"""
if isinstance(device_object, torch.nn.Module):
next(device_object.parameters()).device
elif isinstance(device_object, torch.Tensor):
pass
return tensor
class Model(torch.nn.Module):
""" An implemented tdnn affine component by conv1d
y = splice(w * x, context) + b
@input_dim: number of dims of frame <=> inputs channels of conv
@output_dim: number of layer nodes <=> outputs channels of conv
@context: a list of context
e.g. [-2,0,2]
If context is [0], then the TdnnAffine is equal to linear layer.
"""
def __init__(self, input_dim, output_dim, context=[0], bias=True, pad=
True, norm_w=False, norm_f=False, subsampling_factor=1):
super().__init__()
for index in range(0, len(context) - 1):
if context[index] >= context[index + 1]:
raise ValueError(
'Context tuple {} is invalid, such as the order.'.
format(context))
self.input_dim = input_dim
self.output_dim = output_dim
self.context = context
self.bool_bias = bias
self.pad = pad
self.norm_w = norm_w
self.norm_f = norm_f
self.stride = subsampling_factor
self.left_context = context[0] if context[0] < 0 else 0
self.right_context = context[-1] if context[-1] > 0 else 0
self.tot_context = self.right_context - self.left_context + 1
if self.tot_context > 1 and self.norm_f:
self.norm_f = False
None
kernel_size = self.tot_context,
self.weight = torch.nn.Parameter(torch.randn(output_dim, input_dim,
*kernel_size))
if self.bool_bias:
self.bias = torch.nn.Parameter(torch.randn(output_dim))
else:
self.register_parameter('bias', None)
self.init_weight()
if len(context) != self.tot_context:
self.mask = torch.tensor([[[(1 if index in context else 0) for
index in range(self.left_context, self.right_context + 1)]]])
else:
self.mask = None
self.selected_device = False
def init_weight(self):
torch.nn.init.normal_(self.weight, 0.0, 0.01)
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
if self.pad:
inputs = F.pad(inputs, (-self.left_context, self.right_context),
mode='constant', value=0)
assert inputs.shape[2] >= self.tot_context
if not self.selected_device and self.mask is not None:
self.mask = to_device(self, self.mask)
self.selected_device = True
filters = (self.weight * self.mask if self.mask is not None else
self.weight)
if self.norm_w:
filters = F.normalize(filters, dim=1)
if self.norm_f:
inputs = F.normalize(inputs, dim=1)
outputs = F.conv1d(inputs, filters, self.bias, self.stride, padding
=0, dilation=1, groups=1)
return outputs
def extra_repr(self):
return (
'{input_dim}, {output_dim}, context={context}, bias={bool_bias}, stride={stride}, pad={pad}, norm_w={norm_w}, norm_f={norm_f}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
x = x[0]
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1 if m.bias is not None else 0
total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
m.to
# ... truncated (>4000 chars) for memory efficiency |
UpBlok | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xm/cxmk2kod6zgjturywionsuihaxqils4fvzrd7bziqpvptc3rgw43.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 4
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-1) + x1)) + (48*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4e/c4efs56ymyev6yow4ruutakn3po5nni7rvtifmzxqreckdzecoje.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cr/ccri7cffjdxhdcglzfvzhu4wpte5acu3pxionui5nbusnbkkpvyl.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_5 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_7, %primals_8, [2, 2], [1, 1], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf4, primals_6, 256, grid=grid(256), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_7, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 8, 8), (256, 64, 8, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf6, primals_8, 1024, grid=grid(1024), stream=stream0)
del primals_8
return (buf6, primals_3, primals_5, primals_7, buf0, buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class UpBlok(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1,
stride=1, padding=0)
self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.deconv = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=4, stride=2, padding=1)
def forward(self, upsampled, shortcut):
x = torch.cat([upsampled, shortcut], dim=1)
x = self.conv1x1(x)
x = F.relu(x)
x = self.conv3x3(x)
x = F.relu(x)
x = self.deconv(x)
return x
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 48 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_1[grid(256)](buf4, primals_6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
buf5 = extern_kernels.convolution(buf4, primals_7, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 8, 8), (256, 64, 8, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_2[grid(1024)](buf6, primals_8, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_8
return buf6, primals_3, primals_5, primals_7, buf0, buf2, buf4
class UpBlokNew(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1,
stride=1, padding=0)
self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.deconv = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=4, stride=2, padding=1)
def forward(self, input_0, input_1):
primals_3 = self.conv1x1.weight
primals_4 = self.conv1x1.bias
primals_5 = self.conv3x3.weight
primals_6 = self.conv3x3.bias
primals_7 = self.deconv.weight
primals_8 = self.deconv.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| fendaq/TextRSN | UpBlok | false | 6,684 | [
"MIT"
] | 1 | 02a6bc06cd64b581414ed5065a8c93e0c68a807a | https://github.com/fendaq/TextRSN/tree/02a6bc06cd64b581414ed5065a8c93e0c68a807a | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1x1 = nn.Conv2d(in_channels, in_channels, kernel_size=1,
stride=1, padding=0)
self.conv3x3 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
stride=1, padding=1)
self.deconv = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=4, stride=2, padding=1)
def forward(self, upsampled, shortcut):
x = torch.cat([upsampled, shortcut], dim=1)
x = self.conv1x1(x)
x = F.relu(x)
x = self.conv3x3(x)
x = F.relu(x)
x = self.deconv(x)
return x
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (1, 256), (256, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 16384, grid=grid(16384), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 16384, grid=grid(16384), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf5)
del primals_7
return (reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 256), (256, 1), 0), primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, state_dim):
super(Critic, self).__init__()
self.fc1 = nn.Linear(state_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 256), (256, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (1, 256), (256, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3,
primals_5, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_6, (256, 1), (1, 256),
0), alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), reinterpret_tensor(buf3, (64, 256), (256, 1), 0
), primals_6, buf6, primals_4, buf7
class CriticNew(nn.Module):
def __init__(self, state_dim):
super(CriticNew, self).__init__()
self.fc1 = nn.Linear(state_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| fengzhengyong-github/Deep-reinforcement-learning-with-pytorch | Critic | false | 6,685 | [
"MIT"
] | 1 | 3c56b601d14b0b0c8cde4b6bc6df5c1e8f298c7b | https://github.com/fengzhengyong-github/Deep-reinforcement-learning-with-pytorch/tree/3c56b601d14b0b0c8cde4b6bc6df5c1e8f298c7b | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, state_dim):
super().__init__()
self.fc1 = nn.Linear(state_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [context], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
def __init__(self, dropout: 'float'=0.0) ->None:
super(ScaledDotProductAttention, self).__init__()
self._dropout = nn.Dropout(dropout)
self._softmax = nn.Softmax(dim=2)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', scale: 'float'=None, attn_mask: 'torch.Tensor'=None
) ->torch.Tensor:
attn = torch.bmm(query, key.transpose(1, 2))
if scale is not None:
attn *= scale
if attn_mask is not None:
attn = attn.masked_fill(attn_mask, -1e+32)
attn = self._softmax(attn)
attn = self._dropout(attn)
context = torch.bmm(attn, value)
return context, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
return buf3, buf2
class ScaledDotProductAttentionNew(nn.Module):
def __init__(self, dropout: 'float'=0.0) ->None:
super(ScaledDotProductAttentionNew, self).__init__()
self._dropout = nn.Dropout(dropout)
self._softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| fengtaoo/opmft | ScaledDotProductAttention | false | 6,686 | [
"MIT"
] | 1 | 64f2a12c724295cd913eda02502f2e2a20f2dd55 | https://github.com/fengtaoo/opmft/tree/64f2a12c724295cd913eda02502f2e2a20f2dd55 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, dropout: 'float'=0.0) ->None:
super().__init__()
self._dropout = nn.Dropout(dropout)
self._softmax = nn.Softmax(dim=2)
def forward(self, query: 'torch.Tensor', key: 'torch.Tensor', value:
'torch.Tensor', scale: 'float'=None, attn_mask: 'torch.Tensor'=None
) ->torch.Tensor:
attn = torch.bmm(query, key.transpose(1, 2))
if scale is not None:
attn *= scale
if attn_mask is not None:
attn = attn.masked_fill(attn_mask, -1e+32)
attn = self._softmax(attn)
attn = self._dropout(attn)
context = torch.bmm(attn, value)
return context, attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return []
|
L_TV | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6n/c6nimak5zbthq5qzhy4ixcujhlsgcwwimsxrlof7ud5njaqrhzf7.py
# Topologically Sorted Source Nodes: [sub, pow_1, h_tv, truediv, sub_1, pow_2, w_tv, truediv_1, add, mul, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add, aten.mul]
# Source node to ATen node mapping:
# add => add
# h_tv => sum_1
# mul => mul
# pow_1 => pow_1
# pow_2 => pow_2
# sub => sub
# sub_1 => sub_1
# truediv => div
# truediv_1 => div_1
# truediv_2 => div_2
# w_tv => sum_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 12), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 12), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 4), kwargs = {})
triton_per_fused_add_div_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 12
r1 = (rindex // 12)
r2 = rindex % 3
r3 = (rindex // 3)
tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + (4*r3)), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + (4*r3)), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.08333333333333333
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 2.0
tmp21 = tmp19 * tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, h_tv, truediv, sub_1, pow_2, w_tv, truediv_1, add, mul, truediv_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div, aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_pow_sub_sum_0.run(buf2, arg0_1, 1, 192, grid=grid(1), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.optim
class L_TV(nn.Module):
def __init__(self, TVLoss_weight=1):
super(L_TV, self).__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = (x.size()[2] - 1) * x.size()[3]
count_w = x.size()[2] * (x.size()[3] - 1)
h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum()
w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum()
return self.TVLoss_weight * 2 * (h_tv / count_h + w_tv / count_w
) / batch_size
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
r2 = rindex % 3
r3 = rindex // 3
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0)
tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.where(rmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = 0.08333333333333333
tmp17 = tmp7 * tmp16
tmp18 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = 2.0
tmp21 = tmp19 * tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1,
192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class L_TVNew(nn.Module):
def __init__(self, TVLoss_weight=1):
super(L_TVNew, self).__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| farhantandia/Applied-CV-Zero-DCE-master | L_TV | false | 6,687 | [
"MIT"
] | 1 | 56a0f8aec799eb5d125f5d9f44f692b9a9a3c990 | https://github.com/farhantandia/Applied-CV-Zero-DCE-master/tree/56a0f8aec799eb5d125f5d9f44f692b9a9a3c990 | import torch
import torch.nn as nn
import torch.optim
class Model(nn.Module):
def __init__(self, TVLoss_weight=1):
super().__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = (x.size()[2] - 1) * x.size()[3]
count_w = x.size()[2] * (x.size()[3] - 1)
h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h_x - 1, :], 2).sum()
w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w_x - 1], 2).sum()
return self.TVLoss_weight * 2 * (h_tv / count_h + w_tv / count_w
) / batch_size
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
PixelNormLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3i/c3ikep7hd3hzwrmmgndgubxzmbyo4q2nelwv5iugzxxh7cpf2ljf.py
# Topologically Sorted Source Nodes: [pow_1, mean, add, sqrt, truediv], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# pow_1 => pow_1
# sqrt => sqrt
# truediv => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 0.0001), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt), kwargs = {})
triton_poi_fused_add_div_mean_pow_sqrt_0 = async_compile.triton('triton_poi_fused_add_div_mean_pow_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_pow_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_pow_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 0.0001
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, mean, add, sqrt, truediv], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_sqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PixelNormLayer(nn.Module):
"""
Pixelwise feature vector normalization.
"""
def __init__(self, eps=1e-08):
super(PixelNormLayer, self).__init__()
self.eps = eps
def forward(self, x):
return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + 0.0001)
def __repr__(self):
return self.__class__.__name__ + '(eps = %s)' % self.eps
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_pow_sqrt_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 0.0001
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_sqrt_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PixelNormLayerNew(nn.Module):
"""
Pixelwise feature vector normalization.
"""
def __init__(self, eps=1e-08):
super(PixelNormLayerNew, self).__init__()
self.eps = eps
def __repr__(self):
return self.__class__.__name__ + '(eps = %s)' % self.eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ferngonzalezp/turbulence-GAN | PixelNormLayer | false | 6,688 | [
"MIT"
] | 1 | a215a3c5af2dc9a723f95c344e295ecc08954f26 | https://github.com/ferngonzalezp/turbulence-GAN/tree/a215a3c5af2dc9a723f95c344e295ecc08954f26 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Pixelwise feature vector normalization.
"""
def __init__(self, eps=1e-08):
super().__init__()
self.eps = eps
def forward(self, x):
return x / torch.sqrt(torch.mean(x ** 2, dim=1, keepdim=True) + 0.0001)
def __repr__(self):
return self.__class__.__name__ + '(eps = %s)' % self.eps
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3u/c3uq2hoonbbvqjds2qsaq4ooqhekddxwhva2j4ypqroy7umacfqg.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 111392
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3481) % 8
x0 = xindex % 3481
x4 = (xindex // 3481)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3488*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gn/cgn74ejw4bzj5ez2aztubyhhaclhochfrqpevzbmciail4gnv3cl.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 26912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 29
x1 = (xindex // 29) % 29
x4 = (xindex // 841)
x3 = (xindex // 6728)
x5 = xindex % 6728
tmp0 = tl.load(in_ptr0 + ((2*x0) + (118*x1) + (3488*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (118*x1) + (3488*x4)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (59 + (2*x0) + (118*x1) + (3488*x4)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (60 + (2*x0) + (118*x1) + (3488*x4)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + (6752*x3)), tmp6, xmask)
tl.store(out_ptr1 + (x5 + (6784*x3)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uu/cuuo36ncjspstgmpb2226izcifk422ht2ottnvqa3qjmtawosjhg.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 576) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ev/cevbnauysno3ccxrpgpdwhepssjpgjagrsn4aag5slw6x5wliw2d.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (48*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (48*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (24 + (2*x0) + (48*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (25 + (2*x0) + (48*x1)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/u3/cu3u6nisj4uasi7xrobwpjqd3s4to52j77z4bqgi5byl3fw2cmya.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 49) % 24
x2 = (xindex // 1176)
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + (1184*x2)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/h7/ch7p7gmblj3hu3k6sxp5ifdvrqk2kt6oiaz6mnrptpbnfgatyjsf.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => _low_memory_max_pool2d_with_offsets_2, getitem_5
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3) % 3
x2 = (xindex // 9) % 24
x3 = (xindex // 216)
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (14*x1) + (49*x2) + (1184*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (14*x1) + (49*x2) + (1184*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (7 + (2*x0) + (14*x1) + (49*x2) + (1184*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (8 + (2*x0) + (14*x1) + (49*x2) + (1184*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x4), tmp15, xmask)
tl.store(out_ptr1 + (x4), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4d/c4du65qclyui2slr55w75uy2vgvlytpo6a7ayna5e7srbmclnrnv.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nc/cncufi5fwljjqzodj5xs4ful6p33khupoyd6zror7uetj5egismy.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_7 = async_compile.triton('triton_poi_fused_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (8, 3, 6, 6), (108, 36, 6, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 6, 6), (288, 36, 6, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (24, 16, 6, 6), (576, 36, 6, 1))
assert_size_stride(primals_7, (24, ), (1, ))
assert_size_stride(primals_8, (100, 216), (216, 1))
assert_size_stride(primals_9, (100, ), (1, ))
assert_size_stride(primals_10, (32, 100), (100, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (3, 32), (32, 1))
assert_size_stride(primals_13, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 59, 59), (27848, 3481, 59, 1))
buf1 = empty_strided_cuda((4, 8, 59, 59), (27904, 3488, 59, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 111392, grid=grid(111392), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 8, 29, 29), (6752, 841, 29, 1), torch.float32)
buf3 = empty_strided_cuda((4, 8, 29, 29), (6784, 841, 29, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 26912, grid=grid(26912), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 24, 24), (9216, 576, 24, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 36864, grid=grid(36864), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 12, 12), (2304, 144, 12, 1), torch.float32)
buf7 = empty_strided_cuda((4, 16, 12, 12), (2304, 144, 12, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 9216, grid=grid(9216), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 24, 7, 7), (1176, 49, 7, 1))
buf9 = empty_strided_cuda((4, 24, 7, 7), (1184, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf8, primals_7, buf9, 4704, grid=grid(4704), stream=stream0)
del buf8
del primals_7
buf10 = empty_strided_cuda((4, 24, 3, 3), (216, 9, 3, 1), torch.int8)
buf11 = empty_strided_cuda((4, 24, 3, 3), (216, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 864, grid=grid(864), stream=stream0)
buf12 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf11, (4, 216), (216, 1), 0), reinterpret_tensor(primals_8, (216, 100), (1, 216), 0), out=buf12)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf13, primals_9, 400, grid=grid(400), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf13, reinterpret_tensor(primals_10, (100, 32), (1, 100), 0), out=buf14)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu]
triton_poi_fused_relu_7.run(buf15, primals_11, 128, grid=grid(128), stream=stream0)
del primals_11
buf16 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, buf15, reinterpret_tensor(primals_12, (32, 3), (1, 32), 0), alpha=1, beta=1, out=buf16)
del primals_13
return (buf16, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (4, 216), (216, 1), 0), buf13, buf15, primals_12, primals_10, primals_8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 3, 6, 6), (108, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 8, 6, 6), (288, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((24, 16, 6, 6), (576, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((100, 216), (216, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((3, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
from torch import nn
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(3, 8, 6, 1)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(8, 16, 6, 1)
self.conv3 = nn.Conv2d(16, 24, 6, 1)
self.fc1 = nn.Linear(24 * 3 * 3, 100)
self.fc2 = nn.Linear(100, 32)
self.fc3 = nn.Linear(32, 3)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 24 * 3 * 3)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 111392
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3481 % 8
x0 = xindex % 3481
x4 = xindex // 3481
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3488 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 26912
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 29
x1 = xindex // 29 % 29
x4 = xindex // 841
x3 = xindex // 6728
x5 = xindex % 6728
tmp0 = tl.load(in_ptr0 + (2 * x0 + 118 * x1 + 3488 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 118 * x1 + 3488 * x4), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (59 + 2 * x0 + 118 * x1 + 3488 * x4), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (60 + 2 * x0 + 118 * x1 + 3488 * x4), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x5 + 6752 * x3), tmp6, xmask)
tl.store(out_ptr1 + (x5 + 6784 * x3), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 576 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 24
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3 % 3
x2 = xindex // 9 % 24
x3 = xindex // 216
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 14 * x1 + 49 * x2 + 1184 * x3),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 14 * x1 + 49 * x2 + 1184 * x3),
xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (7 + 2 * x0 + 14 * x1 + 49 * x2 + 1184 * x3),
xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (8 + 2 * x0 + 14 * x1 + 49 * x2 + 1184 * x3),
xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x4, tmp15, xmask)
tl.store(out_ptr1 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (8, 3, 6, 6), (108, 36, 6, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 6, 6), (288, 36, 6, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (24, 16, 6, 6), (576, 36, 6, 1))
assert_size_stride(primals_7, (24,), (1,))
assert_size_stride(primals_8, (100, 216), (216, 1))
assert_size_stride(primals_9, (100,), (1,))
assert_size_stride(primals_10, (32, 100), (100, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (3, 32), (32, 1))
assert_size_stride(primals_13, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 59, 59), (27848, 3481, 59, 1))
buf1 = empty_strided_cuda((4, 8, 59, 59), (27904, 3488, 59, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(111392)](buf0, primals_2,
buf1, 111392, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 8, 29, 29), (6752, 841, 29, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 8, 29, 29), (6784, 841, 29, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(26912)](buf1, buf2,
buf3, 26912, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 24, 24), (9216, 576, 24, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(36864)](buf5, primals_5,
36864, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 12, 12), (2304, 144, 12, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 16, 12, 12), (2304, 144, 12, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(9216)](buf5, buf6,
buf7, 9216, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 24, 7, 7), (1176, 49, 7, 1))
buf9 = empty_strided_cuda((4, 24, 7, 7), (1184, 49, 7, 1), torch.
float32)
triton_poi_fused_convolution_relu_4[grid(4704)](buf8, primals_7,
buf9, 4704, XBLOCK=128, num_warps=4, num_stages=1)
del buf8
del primals_7
buf10 = empty_strided_cuda((4, 24, 3, 3), (216, 9, 3, 1), torch.int8)
buf11 = empty_strided_cuda((4, 24, 3, 3), (216, 9, 3, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_5[grid(864)](buf9, buf10,
buf11, 864, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = empty_strided_cuda((4, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf11, (4, 216), (216, 1), 0),
reinterpret_tensor(primals_8, (216, 100), (1, 216), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(400)](buf13, primals_9, 400, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((4, 32), (32, 1), torch.float32)
extern_kernels.mm(buf13, reinterpret_tensor(primals_10, (100, 32),
(1, 100), 0), out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_relu_7[grid(128)](buf15, primals_11, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
buf16 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
extern_kernels.addmm(primals_13, buf15, reinterpret_tensor(
primals_12, (32, 3), (1, 32), 0), alpha=1, beta=1, out=buf16)
del primals_13
return (buf16, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (4,
216), (216, 1), 0), buf13, buf15, primals_12, primals_10, primals_8)
class CNNNew(nn.Module):
def __init__(self):
super(CNNNew, self).__init__()
self.conv1 = nn.Conv2d(3, 8, 6, 1)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(8, 16, 6, 1)
self.conv3 = nn.Conv2d(16, 24, 6, 1)
self.fc1 = nn.Linear(24 * 3 * 3, 100)
self.fc2 = nn.Linear(100, 32)
self.fc3 = nn.Linear(32, 3)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_12 = self.fc3.weight
primals_13 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| eosay/robotCNN | CNN | false | 6,689 | [
"MIT"
] | 1 | 9eaefcc223e868c01f6d1a49a28d2a9f392857e5 | https://github.com/eosay/robotCNN/tree/9eaefcc223e868c01f6d1a49a28d2a9f392857e5 | import torch
from torch.nn import functional as F
from torch import nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 8, 6, 1)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(8, 16, 6, 1)
self.conv3 = nn.Conv2d(16, 24, 6, 1)
self.fc1 = nn.Linear(24 * 3 * 3, 100)
self.fc2 = nn.Linear(100, 32)
self.fc3 = nn.Linear(32, 3)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 24 * 3 * 3)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
SequenceBias | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vg/cvgvkbs7he2kxlg5pfohohojmk4myarfheu6y73rbt6z3xdls2y7.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %repeat],), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3 + (16*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x4), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 80, grid=grid(80), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from torch.nn.parameter import Parameter
class SequenceBias(nn.Module):
"""
Adds one bias element to the end of the sequence.
so if the input has a shape ``(L, N, E)``, where
``L`` is the sequence length, ``N`` is the batch size, and ``E`` is
the embedding dimension, the output will have a shape
``(L+1, N, E)``.
Attributes:
bias (:class:`torch.nn.parameter.Parameter`): the learnable bias of
the module of shape ``(E)``, where ``E`` is the embedding dimension.
Example:
>>> m = SequenceBias(16)
>>> input = torch.randn(20, 4, 16)
>>> output = m(input)
>>> print(output.size())
torch.Size([21, 4, 16])
"""
def __init__(self, embed_dim: 'int'):
"""
Args:
embed_dim: Embedding dimension
"""
super(SequenceBias, self).__init__()
self.bias = Parameter(torch.empty(embed_dim))
self._reset_parameters()
def _reset_parameters(self):
"""
assing's Normally distributed random values to bias.
"""
nn.init.normal_(self.bias)
def forward(self, x):
_, bsz, _ = x.shape
return torch.cat([x, self.bias.repeat(1, bsz, 1)])
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = tl.load(in_ptr1 + x0, tmp6 & xmask, eviction_policy='evict_last',
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((5, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, primals_2, buf0, 80,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class SequenceBiasNew(nn.Module):
"""
Adds one bias element to the end of the sequence.
so if the input has a shape ``(L, N, E)``, where
``L`` is the sequence length, ``N`` is the batch size, and ``E`` is
the embedding dimension, the output will have a shape
``(L+1, N, E)``.
Attributes:
bias (:class:`torch.nn.parameter.Parameter`): the learnable bias of
the module of shape ``(E)``, where ``E`` is the embedding dimension.
Example:
>>> m = SequenceBias(16)
>>> input = torch.randn(20, 4, 16)
>>> output = m(input)
>>> print(output.size())
torch.Size([21, 4, 16])
"""
def __init__(self, embed_dim: 'int'):
"""
Args:
embed_dim: Embedding dimension
"""
super(SequenceBiasNew, self).__init__()
self.bias = Parameter(torch.empty(embed_dim))
self._reset_parameters()
def _reset_parameters(self):
"""
assing's Normally distributed random values to bias.
"""
nn.init.normal_(self.bias)
def forward(self, input_0):
primals_2 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| ffuuugor/opacus | SequenceBias | false | 6,690 | [
"Apache-2.0"
] | 1 | 2048a6e92902685c2a735e9fb7c0d48b4846b494 | https://github.com/ffuuugor/opacus/tree/2048a6e92902685c2a735e9fb7c0d48b4846b494 | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from torch.nn.parameter import Parameter
class Model(nn.Module):
"""
Adds one bias element to the end of the sequence.
so if the input has a shape ``(L, N, E)``, where
``L`` is the sequence length, ``N`` is the batch size, and ``E`` is
the embedding dimension, the output will have a shape
``(L+1, N, E)``.
Attributes:
bias (:class:`torch.nn.parameter.Parameter`): the learnable bias of
the module of shape ``(E)``, where ``E`` is the embedding dimension.
Example:
>>> m = SequenceBias(16)
>>> input = torch.randn(20, 4, 16)
>>> output = m(input)
>>> print(output.size())
torch.Size([21, 4, 16])
"""
def __init__(self, embed_dim: 'int'):
"""
Args:
embed_dim: Embedding dimension
"""
super().__init__()
self.bias = Parameter(torch.empty(embed_dim))
self._reset_parameters()
def _reset_parameters(self):
"""
assing's Normally distributed random values to bias.
"""
nn.init.normal_(self.bias)
def forward(self, x):
_, bsz, _ = x.shape
return torch.cat([x, self.bias.repeat(1, bsz, 1)])
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
SoftmaxAffineLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fl/cflstwmlz2yvpiz53fzxoydfaocdtqjlttm2ikj7dzqmkakdkkho.py
# Topologically Sorted Source Nodes: [outputs, log_softmax], Original ATen: [aten.convolution, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, sub, sum_1
# outputs => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%convolution, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__log_softmax_convolution_0 = async_compile.triton('triton_poi_fused__log_softmax_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_convolution_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr1 + (1))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp10 = tl.load(in_ptr1 + (2))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp15 = tl.load(in_ptr1 + (3))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tmp3 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp7 - tmp18
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp12 - tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp18
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tl.store(out_ptr0 + (x2), tmp18, xmask)
tl.store(out_ptr1 + (x2), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tu/ctudv5w6bvxf5xoqixsfrddt2pyqqafh7xcdkfq5p2cwx5wzgjto.py
# Topologically Sorted Source Nodes: [outputs, log_softmax], Original ATen: [aten.convolution, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, log, sub, sub_1
# outputs => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%convolution, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %amax), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_convolution_1 = async_compile.triton('triton_poi_fused__log_softmax_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp4 - tmp6
tl.store(in_out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf2 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [outputs, log_softmax], Original ATen: [aten.convolution, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_convolution_0.run(buf0, primals_3, buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [outputs, log_softmax], Original ATen: [aten.convolution, aten._log_softmax]
triton_poi_fused__log_softmax_convolution_1.run(buf3, primals_3, buf1, buf2, 64, grid=grid(64), stream=stream0)
del buf1
del buf2
del primals_3
return (buf3, primals_1, primals_2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn
def to_device(device_object, tensor):
"""
Select device for non-parameters tensor w.r.t model or tensor which has been specified a device.
"""
if isinstance(device_object, torch.nn.Module):
next(device_object.parameters()).device
elif isinstance(device_object, torch.Tensor):
pass
return tensor
class TdnnAffine(torch.nn.Module):
""" An implemented tdnn affine component by conv1d
y = splice(w * x, context) + b
@input_dim: number of dims of frame <=> inputs channels of conv
@output_dim: number of layer nodes <=> outputs channels of conv
@context: a list of context
e.g. [-2,0,2]
If context is [0], then the TdnnAffine is equal to linear layer.
"""
def __init__(self, input_dim, output_dim, context=[0], bias=True, pad=
True, norm_w=False, norm_f=False, subsampling_factor=1):
super(TdnnAffine, self).__init__()
for index in range(0, len(context) - 1):
if context[index] >= context[index + 1]:
raise ValueError(
'Context tuple {} is invalid, such as the order.'.
format(context))
self.input_dim = input_dim
self.output_dim = output_dim
self.context = context
self.bool_bias = bias
self.pad = pad
self.norm_w = norm_w
self.norm_f = norm_f
self.stride = subsampling_factor
self.left_context = context[0] if context[0] < 0 else 0
self.right_context = context[-1] if context[-1] > 0 else 0
self.tot_context = self.right_context - self.left_context + 1
if self.tot_context > 1 and self.norm_f:
self.norm_f = False
None
kernel_size = self.tot_context,
self.weight = torch.nn.Parameter(torch.randn(output_dim, input_dim,
*kernel_size))
if self.bool_bias:
self.bias = torch.nn.Parameter(torch.randn(output_dim))
else:
self.register_parameter('bias', None)
self.init_weight()
if len(context) != self.tot_context:
self.mask = torch.tensor([[[(1 if index in context else 0) for
index in range(self.left_context, self.right_context + 1)]]])
else:
self.mask = None
self.selected_device = False
def init_weight(self):
torch.nn.init.normal_(self.weight, 0.0, 0.01)
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
if self.pad:
inputs = F.pad(inputs, (-self.left_context, self.right_context),
mode='constant', value=0)
assert inputs.shape[2] >= self.tot_context
if not self.selected_device and self.mask is not None:
self.mask = to_device(self, self.mask)
self.selected_device = True
filters = (self.weight * self.mask if self.mask is not None else
self.weight)
if self.norm_w:
filters = F.normalize(filters, dim=1)
if self.norm_f:
inputs = F.normalize(inputs, dim=1)
outputs = F.conv1d(inputs, filters, self.bias, self.stride, padding
=0, dilation=1, groups=1)
return outputs
def extra_repr(self):
return (
'{input_dim}, {output_dim}, context={context}, bias={bool_bias}, stride={stride}, pad={pad}, norm_w={norm_w}, norm_f={norm_f}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
x = x[0]
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1 if m.bias is not None else 0
total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
m.total_ops += torch.DoubleTensor([int(total_ops)])
class SoftmaxAffineLayer(torch.nn.Module):
""" An usual 2-fold softmax layer with an affine transform.
@dim: which dim to apply softmax on
"""
def __init__(self, input_dim, output_dim, dim=1, log=True, bias=True,
special_init=False):
super(SoftmaxAffineLayer, self).__init__()
self.affine = TdnnAffine(input_dim, output_dim, bias=bias)
if log:
self.softmax = torch.nn.LogSoftmax(dim=dim)
else:
self.softmax = torch.nn.Softmax(dim=dim)
if special_init:
torch.nn.init.xavier_uniform_(self.affine.weight, gain=torch.nn
.init.calculate_gain('sigmoid'))
def forward(self, inputs):
"""
@inputs: any, such as a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
return self.softmax(self.affine(inputs))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_convolution_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp9 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr1 + 2)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK])
tmp14 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp15 = tl.load(in_ptr1 + 3)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = triton_helpers.maximum(tmp3, tmp7)
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp8, tmp12)
tmp17 = tmp14 + tmp16
tmp18 = triton_helpers.maximum(tmp13, tmp17)
tmp19 = tmp3 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp7 - tmp18
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp12 - tmp18
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp18
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tl.store(out_ptr0 + x2, tmp18, xmask)
tl.store(out_ptr1 + x2, tmp29, xmask)
@triton.jit
def triton_poi_fused__log_softmax_convolution_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp4 - tmp6
tl.store(in_out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
buf2 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_convolution_0[grid(16)](buf0,
primals_3, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = buf0
del buf0
triton_poi_fused__log_softmax_convolution_1[grid(64)](buf3,
primals_3, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del buf2
del primals_3
return buf3, primals_1, primals_2, buf3
def to_device(device_object, tensor):
"""
Select device for non-parameters tensor w.r.t model or tensor which has been specified a device.
"""
if isinstance(device_object, torch.nn.Module):
next(device_object.parameters()).device
elif isinstance(device_object, torch.Tensor):
pass
return tensor
class TdnnAffine(torch.nn.Module):
""" An implemented tdnn affine component by conv1d
y = splice(w * x, context) + b
@input_dim: number of dims of frame <=> inputs channels of conv
@output_dim: number of layer nodes <=> outputs channels of conv
@context: a list of context
e.g. [-2,0,2]
If context is [0], then the TdnnAffine is equal to linear layer.
"""
def __init__(self, input_dim, output_dim, context=[0], bias=True, pad=
True, norm_w=False, norm_f=False, subsampling_factor=1):
super(TdnnAffine, self).__init__()
for index in range(0, len(context) - 1):
if context[index] >= context[index + 1]:
raise ValueError(
'Context tuple {} is invalid, such as the order.'.
format(context))
self.input_dim = input_dim
self.output_dim = output_dim
self.context = context
self.bool_bias = bias
self.pad = pad
self.norm_w = norm_w
self.norm_f = norm_f
self.stride = subsampling_factor
self.left_context = context[0] if context[0] < 0 else 0
self.right_context = context[-1] if context[-1] > 0 else 0
self.tot_context = self.right_context - self.left_context + 1
if self.tot_context > 1 and self.norm_f:
self.norm_f = False
None
kernel_size = self.tot_context,
self.weight = torch.nn.Parameter(torch.randn(output_dim, input_dim,
*kernel_size))
if self.bool_bias:
self.bias = torch.nn.Parameter(torch.randn(output_dim))
else:
self.register_parameter('bias', None)
self.init_weight()
if len(context) != self.tot_context:
self.mask = torch.tensor([[[(1 if index in context else 0) for
index in range(self.left_context, self.right_context + 1)]]])
else:
self.mask = None
self.selected_device = False
def init_weight(self):
torch.nn.init.normal_(self.weight, 0.0, 0.01)
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
if self.pad:
inputs = F.pad(inputs, (-self.left_context, self.right_context),
mode='constant', value=0)
assert inputs.shape[2] >= self.tot_context
if not self.selected_device and self.mask is not None:
self.mask = to_device(self, self.mask)
self.selected_device = True
filters = (self.weight * self.mask if self.mask is not None else
self.weight)
if self.norm_w:
filters = F.normalize(filters, dim=1)
if self.norm_f:
inputs = F.normalize(inputs, dim=1)
outputs = F.conv1d(inputs, filters, self.bias, self.stride, padding
=0, dilation=1, groups=1)
return outputs
def extra_repr(self):
return (
'{input_dim}, {output_dim}, context={context}, bias={bool_bias}, stride={stride}, pad={pad}, norm_w={norm_w}, norm_f={norm_f}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
x = x[0]
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1 if m.bias is not None else 0
total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
m.total_ops += torch.DoubleTensor([int(total_ops)])
class SoftmaxAffineLayerNew(torch.nn.Module):
""" An usual 2-fold softmax layer with an affine transform.
@dim: which dim to apply softmax on
"""
def __init__(self, input_dim, output_dim, dim=1, log=True, bias=True,
special_init=False):
super(SoftmaxAffineLayerNew, self).__init__()
self.affine = TdnnAffine(input_dim, output_dim, bias=bias)
if log:
self.softmax = torch.nn.LogSoftmax(dim=dim)
else:
self.softmax = torch.nn.Softmax(dim=dim)
if special_init:
torch.nn.init.xavier_uniform_(self.affine.weight, gain=torch.nn
.init.calculate_gain('sigmoid'))
def forward(self, input_0):
primals_2 = self.affine.weight
primals_3 = self.affine.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| fancyliumeng/asv-subtools | SoftmaxAffineLayer | false | 6,691 | [
"Apache-2.0"
] | 1 | 56a13484472e7ae6eb00d762c00d57e581e78eb4 | https://github.com/fancyliumeng/asv-subtools/tree/56a13484472e7ae6eb00d762c00d57e581e78eb4 | import torch
import torch.nn.functional as F
import torch.nn
def to_device(device_object, tensor):
"""
Select device for non-parameters tensor w.r.t model or tensor which has been specified a device.
"""
if isinstance(device_object, torch.nn.Module):
next(device_object.parameters()).device
elif isinstance(device_object, torch.Tensor):
pass
return tensor
class TdnnAffine(torch.nn.Module):
""" An implemented tdnn affine component by conv1d
y = splice(w * x, context) + b
@input_dim: number of dims of frame <=> inputs channels of conv
@output_dim: number of layer nodes <=> outputs channels of conv
@context: a list of context
e.g. [-2,0,2]
If context is [0], then the TdnnAffine is equal to linear layer.
"""
def __init__(self, input_dim, output_dim, context=[0], bias=True, pad=
True, norm_w=False, norm_f=False, subsampling_factor=1):
super().__init__()
for index in range(0, len(context) - 1):
if context[index] >= context[index + 1]:
raise ValueError(
'Context tuple {} is invalid, such as the order.'.
format(context))
self.input_dim = input_dim
self.output_dim = output_dim
self.context = context
self.bool_bias = bias
self.pad = pad
self.norm_w = norm_w
self.norm_f = norm_f
self.stride = subsampling_factor
self.left_context = context[0] if context[0] < 0 else 0
self.right_context = context[-1] if context[-1] > 0 else 0
self.tot_context = self.right_context - self.left_context + 1
if self.tot_context > 1 and self.norm_f:
self.norm_f = False
None
kernel_size = self.tot_context,
self.weight = torch.nn.Parameter(torch.randn(output_dim, input_dim,
*kernel_size))
if self.bool_bias:
self.bias = torch.nn.Parameter(torch.randn(output_dim))
else:
self.register_parameter('bias', None)
self.init_weight()
if len(context) != self.tot_context:
self.mask = torch.tensor([[[(1 if index in context else 0) for
index in range(self.left_context, self.right_context + 1)]]])
else:
self.mask = None
self.selected_device = False
def init_weight(self):
torch.nn.init.normal_(self.weight, 0.0, 0.01)
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
if self.pad:
inputs = F.pad(inputs, (-self.left_context, self.right_context),
mode='constant', value=0)
assert inputs.shape[2] >= self.tot_context
if not self.selected_device and self.mask is not None:
self.mask = to_device(self, self.mask)
self.selected_device = True
filters = (self.weight * self.mask if self.mask is not None else
self.weight)
if self.norm_w:
filters = F.normalize(filters, dim=1)
if self.norm_f:
inputs = F.normalize(inputs, dim=1)
outputs = F.conv1d(inputs, filters, self.bias, self.stride, padding
=0, dilation=1, groups=1)
return outputs
def extra_repr(self):
return (
'{input_dim}, {output_dim}, context={context}, bias={bool_bias}, stride={stride}, pad={pad}, norm_w={norm_w}, norm_f={norm_f}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
x = x[0]
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1 if m.bias is not None else 0
total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
# ... truncated (>4000 chars) for memory efficiency |
MLPLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_1 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class MLPLayer(nn.Module):
"""
Head for getting sentence representations over RoBERTa/BERT's CLS representation.
"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, features, **kwargs):
x = self.dense(features)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class MLPLayerNew(nn.Module):
"""
Head for getting sentence representations over RoBERTa/BERT's CLS representation.
"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, input_0):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| BDBC-KG-NLP/MixCSE_AAAI2022 | MLPLayer | false | 6,692 | [
"MIT"
] | 1 | 884145e24a5258c044fedb658df9999f012df875 | https://github.com/BDBC-KG-NLP/MixCSE_AAAI2022/tree/884145e24a5258c044fedb658df9999f012df875 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Head for getting sentence representations over RoBERTa/BERT's CLS representation.
"""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, features, **kwargs):
x = self.dense(features)
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SEBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xm/cxmgtrbhybjrbk7bowlmvy5b6advoyijkz2cpryk6x6usy2m3kpe.py
# Topologically Sorted Source Nodes: [sum_1, truediv], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# sum_1 => sum_1
# truediv => div
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [2]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {})
triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ad/cadccuyhl7stcp3nyqfgohiwbiv5ckfzxsye27ithwsill6dvmh4.py
# Topologically Sorted Source Nodes: [outputs, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# outputs => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py
# Topologically Sorted Source Nodes: [outputs_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# outputs_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/oh/cohhtvmqxeiqmbgd3r3sllid3fwhv3kpuxtsefbrgz76govryk3x.py
# Topologically Sorted Source Nodes: [scale, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# scale => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, truediv], Original ATen: [aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sum_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [outputs], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 0), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 1), (1, 1, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [outputs, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 4, grid=grid(4), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [outputs_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 1), (4, 1, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [outputs_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf4, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(primals_1, buf4, buf5, 64, grid=grid(64), stream=stream0)
return (buf5, primals_1, primals_2, primals_4, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), buf2, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn
def to_device(device_object, tensor):
"""
Select device for non-parameters tensor w.r.t model or tensor which has been specified a device.
"""
if isinstance(device_object, torch.nn.Module):
next(device_object.parameters()).device
elif isinstance(device_object, torch.Tensor):
pass
return tensor
class TdnnAffine(torch.nn.Module):
""" An implemented tdnn affine component by conv1d
y = splice(w * x, context) + b
@input_dim: number of dims of frame <=> inputs channels of conv
@output_dim: number of layer nodes <=> outputs channels of conv
@context: a list of context
e.g. [-2,0,2]
If context is [0], then the TdnnAffine is equal to linear layer.
"""
def __init__(self, input_dim, output_dim, context=[0], bias=True, pad=
True, norm_w=False, norm_f=False, subsampling_factor=1):
super(TdnnAffine, self).__init__()
for index in range(0, len(context) - 1):
if context[index] >= context[index + 1]:
raise ValueError(
'Context tuple {} is invalid, such as the order.'.
format(context))
self.input_dim = input_dim
self.output_dim = output_dim
self.context = context
self.bool_bias = bias
self.pad = pad
self.norm_w = norm_w
self.norm_f = norm_f
self.stride = subsampling_factor
self.left_context = context[0] if context[0] < 0 else 0
self.right_context = context[-1] if context[-1] > 0 else 0
self.tot_context = self.right_context - self.left_context + 1
if self.tot_context > 1 and self.norm_f:
self.norm_f = False
None
kernel_size = self.tot_context,
self.weight = torch.nn.Parameter(torch.randn(output_dim, input_dim,
*kernel_size))
if self.bool_bias:
self.bias = torch.nn.Parameter(torch.randn(output_dim))
else:
self.register_parameter('bias', None)
self.init_weight()
if len(context) != self.tot_context:
self.mask = torch.tensor([[[(1 if index in context else 0) for
index in range(self.left_context, self.right_context + 1)]]])
else:
self.mask = None
self.selected_device = False
def init_weight(self):
torch.nn.init.normal_(self.weight, 0.0, 0.01)
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
if self.pad:
inputs = F.pad(inputs, (-self.left_context, self.right_context),
mode='constant', value=0)
assert inputs.shape[2] >= self.tot_context
if not self.selected_device and self.mask is not None:
self.mask = to_device(self, self.mask)
self.selected_device = True
filters = (self.weight * self.mask if self.mask is not None else
self.weight)
if self.norm_w:
filters = F.normalize(filters, dim=1)
if self.norm_f:
inputs = F.normalize(inputs, dim=1)
outputs = F.conv1d(inputs, filters, self.bias, self.stride, padding
=0, dilation=1, groups=1)
return outputs
def extra_repr(self):
return (
'{input_dim}, {output_dim}, context={context}, bias={bool_bias}, stride={stride}, pad={pad}, norm_w={norm_w}, norm_f={norm_f}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
x = x[0]
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1 if m.bias is not None else 0
total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
m.total_ops += torch.DoubleTensor([int(total_ops)])
class StatisticsPooling(torch.nn.Module):
""" An usual mean [+ stddev] poolling layer"""
def __init__(self, input_dim, stddev=True, unbiased=False, eps=1e-10):
super(StatisticsPooling, self).__init__()
self.stddev = stddev
self.input_dim = input_dim
if self.stddev:
self.output_dim = 2 * input_dim
else:
self.output_dim = input_dim
self.eps = eps
self.unbiased = unbiased
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
counts = inputs.shape[2]
mean = torch.unsqueeze(inputs.sum(dim=2) / counts, dim=2)
if self.stddev:
if self.unbiased and counts > 1:
counts = counts - 1
var = torch.sum((inputs - mean) ** 2, dim=2) / counts
std = torch.unsqueeze(torch.sqrt(var.clamp(min=self.eps)), dim=2)
return torch.cat((mean, std), dim=1)
else:
return mean
def get_output_dim(self):
return self.output_dim
def extra_repr(self):
return (
'{input_dim}, {output_dim}, stddev={stddev}, unbiased={unbiased}, eps={eps}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
pass
class SEBlock(torch.nn.Module):
""" A SE Block layer layer which can learn to use global information to selectively emphasise informative
features and suppress less useful ones.
This is a pytorch implementation of SE Block based on the paper:
Squeeze-and-Excitation Networks
by JFChou xmuspeech 2019-07-13
Snowdar xmuspeech 2020-04-28 [Check and update]
"""
def __init__(self, input_dim, ratio=4, inplace=True):
"""
@ratio: a reduction ratio which allows us to vary the capacity and computational cost of the SE blocks
in the network.
"""
super(SEBlock, self).__init__()
self.input_dim = input_dim
self.pooling = StatisticsPooling(input_dim, stddev=False)
self.fc_1 = TdnnAffine(input_dim, input_dim // ratio)
self.relu = torch.nn.ReLU(inplace=inplace)
self.fc_2 = TdnnAffine(input_dim // ratio, input_dim)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
x = self.pooling(inputs)
x = self.relu(self.fc_1(x))
scale = self.sigmoid(self.fc_2(x))
return inputs * scale
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.functional as F
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tl.store(in_out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 1
), (4, 1, 0), 0), primals_2, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf1, (4, 1, 1), (1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(4)](buf2, primals_3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 1), (4, 1, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(16)](buf4, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(64)](primals_1, buf4, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf5, primals_1, primals_2, primals_4, reinterpret_tensor(buf0,
(4, 4, 1), (4, 1, 1), 0), buf2, buf4
def to_device(device_object, tensor):
"""
Select device for non-parameters tensor w.r.t model or tensor which has been specified a device.
"""
if isinstance(device_object, torch.nn.Module):
next(device_object.parameters()).device
elif isinstance(device_object, torch.Tensor):
pass
return tensor
class TdnnAffine(torch.nn.Module):
""" An implemented tdnn affine component by conv1d
y = splice(w * x, context) + b
@input_dim: number of dims of frame <=> inputs channels of conv
@output_dim: number of layer nodes <=> outputs channels of conv
@context: a list of context
e.g. [-2,0,2]
If context is [0], then the TdnnAffine is equal to linear layer.
"""
def __init__(self, input_dim, output_dim, context=[0], bias=True, pad=
True, norm_w=False, norm_f=False, subsampling_factor=1):
super(TdnnAffine, self).__init__()
for index in range(0, len(context) - 1):
if context[index] >= context[index + 1]:
raise ValueError(
'Context tuple {} is invalid, such as the order.'.
format(context))
self.input_dim = input_dim
self.output_dim = output_dim
self.context = context
self.bool_bias = bias
self.pad = pad
self.norm_w = norm_w
self.norm_f = norm_f
self.stride = subsampling_factor
self.left_context = context[0] if context[0] < 0 else 0
self.right_context = context[-1] if context[-1] > 0 else 0
self.tot_context = self.right_context - self.left_context + 1
if self.tot_context > 1 and self.norm_f:
self.norm_f = False
None
kernel_size = self.tot_context,
self.weight = torch.nn.Parameter(torch.randn(output_dim, input_dim,
*kernel_size))
if self.bool_bias:
self.bias = torch.nn.Parameter(torch.randn(output_dim))
else:
self.register_parameter('bias', None)
self.init_weight()
if len(context) != self.tot_context:
self.mask = torch.tensor([[[(1 if index in context else 0) for
index in range(self.left_context, self.right_context + 1)]]])
else:
self.mask = None
self.selected_device = False
def init_weight(self):
torch.nn.init.normal_(self.weight, 0.0, 0.01)
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
if self.pad:
inputs = F.pad(inputs, (-self.left_context, self.right_context),
mode='constant', value=0)
assert inputs.shape[2] >= self.tot_context
if not self.selected_device and self.mask is not None:
self.mask = to_device(self, self.mask)
self.selected_device = True
filters = (self.weight * self.mask if self.mask is not None else
self.weight)
if self.norm_w:
filters = F.normalize(filters, dim=1)
if self.norm_f:
inputs = F.normalize(inputs, dim=1)
outputs = F.conv1d(inputs, filters, self.bias, self.stride, padding
=0, dilation=1, groups=1)
return outputs
def extra_repr(self):
return (
'{input_dim}, {output_dim}, context={context}, bias={bool_bias}, stride={stride}, pad={pad}, norm_w={norm_w}, norm_f={norm_f}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
x = x[0]
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1 if m.bias is not None else 0
total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
m.total_ops += torch.DoubleTensor([int(total_ops)])
class StatisticsPooling(torch.nn.Module):
""" An usual mean [+ stddev] poolling layer"""
def __init__(self, input_dim, stddev=True, unbiased=False, eps=1e-10):
super(StatisticsPooling, self).__init__()
self.stddev = stddev
self.input_dim = input_dim
if self.stddev:
self.output_dim = 2 * input_dim
else:
self.output_dim = input_dim
self.eps = eps
self.unbiased = unbiased
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
counts = inputs.shape[2]
mean = torch.unsqueeze(inputs.sum(dim=2) / counts, dim=2)
if self.stddev:
if self.unbiased and counts > 1:
counts = counts - 1
var = torch.sum((inputs - mean) ** 2, dim=2) / counts
std = torch.unsqueeze(torch.sqrt(var.clamp(min=self.eps)), dim=2)
return torch.cat((mean, std), dim=1)
else:
return mean
def get_output_dim(self):
return self.output_dim
def extra_repr(self):
return (
'{input_dim}, {output_dim}, stddev={stddev}, unbiased={unbiased}, eps={eps}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
pass
class SEBlockNew(torch.nn.Module):
""" A SE Block layer layer which can learn to use global information to selectively emphasise informative
features and suppress less useful ones.
This is a pytorch implementation of SE Block based on the paper:
Squeeze-and-Excitation Networks
by JFChou xmuspeech 2019-07-13
Snowdar xmuspeech 2020-04-28 [Check and update]
"""
def __init__(self, input_dim, ratio=4, inplace=True):
"""
@ratio: a reduction ratio which allows us to vary the capacity and computational cost of the SE blocks
in the network.
"""
super(SEBlockNew, self).__init__()
self.input_dim = input_dim
self.pooling = StatisticsPooling(input_dim, stddev=False)
self.fc_1 = TdnnAffine(input_dim, input_dim // ratio)
self.relu = torch.nn.ReLU(inplace=inplace)
self.fc_2 = TdnnAffine(input_dim // ratio, input_dim)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc_1.weight
primals_3 = self.fc_1.bias
primals_4 = self.fc_2.weight
primals_5 = self.fc_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| fancyliumeng/asv-subtools | SEBlock | false | 6,693 | [
"Apache-2.0"
] | 1 | 56a13484472e7ae6eb00d762c00d57e581e78eb4 | https://github.com/fancyliumeng/asv-subtools/tree/56a13484472e7ae6eb00d762c00d57e581e78eb4 | import torch
import torch.nn.functional as F
import torch.nn
def to_device(device_object, tensor):
"""
Select device for non-parameters tensor w.r.t model or tensor which has been specified a device.
"""
if isinstance(device_object, torch.nn.Module):
next(device_object.parameters()).device
elif isinstance(device_object, torch.Tensor):
pass
return tensor
class TdnnAffine(torch.nn.Module):
""" An implemented tdnn affine component by conv1d
y = splice(w * x, context) + b
@input_dim: number of dims of frame <=> inputs channels of conv
@output_dim: number of layer nodes <=> outputs channels of conv
@context: a list of context
e.g. [-2,0,2]
If context is [0], then the TdnnAffine is equal to linear layer.
"""
def __init__(self, input_dim, output_dim, context=[0], bias=True, pad=
True, norm_w=False, norm_f=False, subsampling_factor=1):
super().__init__()
for index in range(0, len(context) - 1):
if context[index] >= context[index + 1]:
raise ValueError(
'Context tuple {} is invalid, such as the order.'.
format(context))
self.input_dim = input_dim
self.output_dim = output_dim
self.context = context
self.bool_bias = bias
self.pad = pad
self.norm_w = norm_w
self.norm_f = norm_f
self.stride = subsampling_factor
self.left_context = context[0] if context[0] < 0 else 0
self.right_context = context[-1] if context[-1] > 0 else 0
self.tot_context = self.right_context - self.left_context + 1
if self.tot_context > 1 and self.norm_f:
self.norm_f = False
None
kernel_size = self.tot_context,
self.weight = torch.nn.Parameter(torch.randn(output_dim, input_dim,
*kernel_size))
if self.bool_bias:
self.bias = torch.nn.Parameter(torch.randn(output_dim))
else:
self.register_parameter('bias', None)
self.init_weight()
if len(context) != self.tot_context:
self.mask = torch.tensor([[[(1 if index in context else 0) for
index in range(self.left_context, self.right_context + 1)]]])
else:
self.mask = None
self.selected_device = False
def init_weight(self):
torch.nn.init.normal_(self.weight, 0.0, 0.01)
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
def forward(self, inputs):
"""
@inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
"""
assert len(inputs.shape) == 3
assert inputs.shape[1] == self.input_dim
if self.pad:
inputs = F.pad(inputs, (-self.left_context, self.right_context),
mode='constant', value=0)
assert inputs.shape[2] >= self.tot_context
if not self.selected_device and self.mask is not None:
self.mask = to_device(self, self.mask)
self.selected_device = True
filters = (self.weight * self.mask if self.mask is not None else
self.weight)
if self.norm_w:
filters = F.normalize(filters, dim=1)
if self.norm_f:
inputs = F.normalize(inputs, dim=1)
outputs = F.conv1d(inputs, filters, self.bias, self.stride, padding
=0, dilation=1, groups=1)
return outputs
def extra_repr(self):
return (
'{input_dim}, {output_dim}, context={context}, bias={bool_bias}, stride={stride}, pad={pad}, norm_w={norm_w}, norm_f={norm_f}'
.format(**self.__dict__))
@classmethod
def thop_count(self, m, x, y):
x = x[0]
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1 if m.bias is not None else 0
total_ops = y.nelement() * (m.input_dim * kernel_ops + bias_ops)
# ... truncated (>4000 chars) for memory efficiency |
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5w/c5wnubyijcgstpnbhnht5ommr737mwfx67lgpfc6mvwlwmhzfkmq.py
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# q_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/um/cumdt56px4jhgi4x7ers5m2jlyr4stfdyfhyb47o43khr5qzdg6f.py
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_3 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ja/cjapoylwc442ttbiaridhuezkvo2e7durqwl2lytr7aq6togjqw3.py
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# attn_weights_4 => div_1
# sum_1 => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_17, [1]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_poi_fused_div_sum_4 = async_compile.triton('triton_poi_fused_div_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 1), (1, 16, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf7, buf8, 4, 16, grid=grid(4, 16), stream=stream0)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_7
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_4.run(buf6, buf10, 64, grid=grid(64), stream=stream0)
return (reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.utils.data
import torch.distributed
import torch.nn as nn
from torch.nn import Parameter
import torch.optim
import torch.optim.lr_scheduler
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self._mask = None
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(self, query, key, value, mask_future_timesteps=False,
key_padding_mask=None, incremental_state=None, need_weights=True,
static_kv=False):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
if static_kv:
assert kv_same and not qkv_same
key = value = None
else:
saved_state = None
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = q.new(0)
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if saved_state is not None:
if 'prev_key' in saved_state:
k = torch.cat((saved_state['prev_key'], k), dim=0)
if 'prev_value' in saved_state:
v = torch.cat((saved_state['prev_value'], v), dim=0)
saved_state['prev_key'] = k
saved_state['prev_value'] = v
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(0)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
k = k.contiguous().view(src_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
v = v.contiguous().view(src_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if mask_future_timesteps and incremental_state is None:
assert query.size() == key.size(
), 'mask_future_timesteps only applies to self-attention'
attn_weights += self.buffered_mask(attn_weights).unsqueeze(0)
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.float().masked_fill(key_padding_mask
.unsqueeze(1).unsqueeze(2), float('-inf')).type_as(attn_weights
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len,
src_len)
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=
self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len,
src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
else:
attn_weights = None
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def buffered_mask(self, tensor):
dim = tensor.size(-1)
if self._mask is None:
self._mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim,
dim)), 1)
if self._mask.size(0) < dim:
self._mask = torch.triu(utils.fill_with_neg_inf(self._mask.
resize_(dim, dim)), 1)
return self._mask[:dim, :dim]
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state,
'attn_state') or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(self, incremental_state, 'attn_state',
buffer)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.utils.data
import torch.distributed
import torch.nn as nn
from torch.nn import Parameter
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), out=buf4)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (16, 4, 1), (1,
16, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_7, reinterpret_tensor(buf8, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf9)
del primals_7
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_sum_4[grid(64)](buf6, buf10, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0
), buf10, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0
), primals_6, reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self._mask = None
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def buffered_mask(self, tensor):
dim = tensor.size(-1)
if self._mask is None:
self._mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim,
dim)), 1)
if self._mask.size(0) < dim:
self._mask = torch.triu(utils.fill_with_neg_inf(self._mask.
resize_(dim, dim)), 1)
return self._mask[:dim, :dim]
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state,
'attn_state') or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(self, incremental_state, 'attn_state',
buffer)
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_proj_weight
primals_5 = self.in_proj_bias
primals_6 = self.out_proj.weight
primals_7 = self.out_proj.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| farhanazareen/Fairseq | MultiheadAttention | false | 6,694 | [
"BSD-3-Clause"
] | 1 | 39c7b6804b4a3426ef23b30f3ca8a3c0a9948079 | https://github.com/farhanazareen/Fairseq/tree/39c7b6804b4a3426ef23b30f3ca8a3c0a9948079 | import torch
import torch.nn.functional as F
import torch.utils.data
import torch.distributed
import torch.nn as nn
from torch.nn import Parameter
import torch.optim
import torch.optim.lr_scheduler
class Model(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self._mask = None
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(self, query, key, value, mask_future_timesteps=False,
key_padding_mask=None, incremental_state=None, need_weights=True,
static_kv=False):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
if static_kv:
assert kv_same and not qkv_same
key = value = None
else:
saved_state = None
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = q.new(0)
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if saved_state is not None:
if 'prev_key' in saved_state:
k = torch.cat((saved_state['prev_key'], k), dim=0)
if 'prev_value' in saved_state:
v = torch.cat((saved_state['prev_value'], v), dim=0)
saved_state['prev_key'] = k
saved_state['prev_value'] = v
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(0)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
k = k.contiguous().view(src_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
v = v.contiguous().view(src_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
attn_weights = torch.bmm(q, k.tra
# ... truncated (>4000 chars) for memory efficiency |
DPLSTMCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cm/ccmo7fnacrzz2bpb3kjhjwznydxc2ydumd6dycmnumtbvfyp23ld.py
# Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward]
# Source node to ATen node mapping:
# c_t => add_1
# f_t => sigmoid_1
# g_t => tanh
# h_t => mul_2
# i_t => sigmoid
# mul => mul
# mul_1 => mul_1
# o_t => sigmoid_2
# tanh_1 => tanh_1
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem,), kwargs = {})
# %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_2,), kwargs = {})
# %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_7), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub_3), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr4 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp16 + tmp17
tmp21 = tmp19 + tmp20
tmp22 = tmp18 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp26 = tmp24 + tmp25
tmp29 = tmp27 + tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.sigmoid(tmp30)
tmp33 = tmp31 * tmp32
tmp34 = tmp7 * tmp23
tmp35 = tmp33 + tmp34
tmp36 = 1.0
tmp37 = tmp36 - tmp31
tmp38 = tmp31 * tmp37
tmp39 = libdevice.tanh(tmp35)
tmp40 = tmp15 * tmp39
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp15, xmask)
tl.store(out_ptr2 + (x2), tmp23, xmask)
tl.store(out_ptr3 + (x2), tmp35, xmask)
tl.store(out_ptr4 + (x2), tmp38, xmask)
tl.store(out_ptr5 + (x2), tmp40, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_6, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0.run(buf0, primals_2, buf1, primals_5, primals_7, buf2, buf4, buf3, buf5, buf7, buf6, 16, grid=grid(16), stream=stream0)
del buf0
del buf1
del primals_2
del primals_5
return (buf6, buf5, primals_3, primals_6, primals_7, buf2, buf3, buf4, buf5, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Optional
from typing import Tuple
class LSTMLinear(nn.Linear):
"""
This function is the same as a nn.Linear layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear)
"""
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPLSTMCell(nn.Module):
"""
Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated
applications of this class.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias)
self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias)
self.reset_parameters()
def reset_parameters(self):
"""
Resets parameters by initializing them from an uniform distribution.
"""
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
"""
Sets max batch length
"""
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
def forward(self, x: 'torch.Tensor', h_prev: 'torch.Tensor', c_prev:
'torch.Tensor', batch_size_t: 'Optional[int]'=None) ->Tuple[torch.
Tensor, torch.Tensor]:
if batch_size_t is None:
gates = self.ih(x) + self.hh(h_prev)
else:
gates = self.ih(x) + self.hh(h_prev[:batch_size_t, :])
i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates,
self.hidden_size, 1)
i_t = torch.sigmoid(i_t_input)
f_t = torch.sigmoid(f_t_input)
g_t = torch.tanh(g_t_input)
o_t = torch.sigmoid(o_t_input)
if batch_size_t is None:
c_t = f_t * c_prev + i_t * g_t
else:
c_t = f_t * c_prev[:batch_size_t, :] + i_t * g_t
h_t = o_t * torch.tanh(c_t)
return h_t, c_t
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'bias': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2,
out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp16 + tmp17
tmp21 = tmp19 + tmp20
tmp22 = tmp18 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp26 = tmp24 + tmp25
tmp29 = tmp27 + tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.sigmoid(tmp30)
tmp33 = tmp31 * tmp32
tmp34 = tmp7 * tmp23
tmp35 = tmp33 + tmp34
tmp36 = 1.0
tmp37 = tmp36 - tmp31
tmp38 = tmp31 * tmp37
tmp39 = libdevice.tanh(tmp35)
tmp40 = tmp15 * tmp39
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp23, xmask)
tl.store(out_ptr3 + x2, tmp35, xmask)
tl.store(out_ptr4 + x2, tmp38, xmask)
tl.store(out_ptr5 + x2, tmp40, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 16),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_6, reinterpret_tensor(primals_4, (4, 16),
(1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_0[grid(16)](buf0
, primals_2, buf1, primals_5, primals_7, buf2, buf4, buf3, buf5,
buf7, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf0
del buf1
del primals_2
del primals_5
return (buf6, buf5, primals_3, primals_6, primals_7, buf2, buf3, buf4,
buf5, buf7)
class LSTMLinear(nn.Linear):
"""
This function is the same as a nn.Linear layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear)
"""
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPLSTMCellNew(nn.Module):
"""
Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated
applications of this class.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias)
self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias)
self.reset_parameters()
def reset_parameters(self):
"""
Resets parameters by initializing them from an uniform distribution.
"""
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
"""
Sets max batch length
"""
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
def forward(self, input_0, input_1, input_2):
primals_1 = self.ih.weight
primals_2 = self.ih.bias
primals_4 = self.hh.weight
primals_5 = self.hh.bias
primals_3 = input_0
primals_6 = input_1
primals_7 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| ffuuugor/opacus | DPLSTMCell | false | 6,695 | [
"Apache-2.0"
] | 1 | 2048a6e92902685c2a735e9fb7c0d48b4846b494 | https://github.com/ffuuugor/opacus/tree/2048a6e92902685c2a735e9fb7c0d48b4846b494 | import math
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Optional
from typing import Tuple
class LSTMLinear(nn.Linear):
"""
This function is the same as a nn.Linear layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear)
"""
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class Model(nn.Module):
"""
Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated
applications of this class.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias)
self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias)
self.reset_parameters()
def reset_parameters(self):
"""
Resets parameters by initializing them from an uniform distribution.
"""
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
"""
Sets max batch length
"""
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
def forward(self, x: 'torch.Tensor', h_prev: 'torch.Tensor', c_prev:
'torch.Tensor', batch_size_t: 'Optional[int]'=None) ->Tuple[torch.
Tensor, torch.Tensor]:
if batch_size_t is None:
gates = self.ih(x) + self.hh(h_prev)
else:
gates = self.ih(x) + self.hh(h_prev[:batch_size_t, :])
i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates,
self.hidden_size, 1)
i_t = torch.sigmoid(i_t_input)
f_t = torch.sigmoid(f_t_input)
g_t = torch.tanh(g_t_input)
o_t = torch.sigmoid(o_t_input)
if batch_size_t is None:
c_t = f_t * c_prev + i_t * g_t
else:
c_t = f_t * c_prev[:batch_size_t, :] + i_t * g_t
h_t = o_t * torch.tanh(c_t)
return h_t, c_t
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ResidualBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/na/cnaws7weknfxcnb3ygsfv2nxagp6zlaj36yxscvqrdlipjrkrt2j.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# out => convolution
# out_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/z2/cz243er3otqhcd4beubsfv6h67mhvzaqoiltyiby3d47o2gckjkl.py
# Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# out_2 => convolution_1
# out_3 => gt_1, mul_1, where_1
# out_4 => add
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %primals_1), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp9, xmask)
tl.store(out_ptr1 + (x3), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward]
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1.run(buf2, primals_5, primals_1, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf2
del primals_5
return (buf3, primals_1, primals_2, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def conv3x3(in_ch, out_ch, stride=1):
"""3x3 convolution with padding."""
return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1)
class ResidualBlock(nn.Module):
"""Simple residual block with two 3x3 convolutions.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
"""
def __init__(self, in_ch, out_ch):
super().__init__()
self.conv1 = conv3x3(in_ch, out_ch)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3(out_ch, out_ch)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.leaky_relu(out)
out = out + identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr1 + x3, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf1,
primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1[grid
(256)](buf2, primals_5, primals_1, buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf2
del primals_5
return buf3, primals_1, primals_2, primals_4, buf1, buf4
def conv3x3(in_ch, out_ch, stride=1):
"""3x3 convolution with padding."""
return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1)
class ResidualBlockNew(nn.Module):
"""Simple residual block with two 3x3 convolutions.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
"""
def __init__(self, in_ch, out_ch):
super().__init__()
self.conv1 = conv3x3(in_ch, out_ch)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3(out_ch, out_ch)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| fqhank/HESIC | ResidualBlock | false | 6,696 | [
"Apache-2.0"
] | 1 | f15cb8e6822af45f0022ea4887fce915e250ed75 | https://github.com/fqhank/HESIC/tree/f15cb8e6822af45f0022ea4887fce915e250ed75 | import torch
import torch.nn as nn
def conv3x3(in_ch, out_ch, stride=1):
"""3x3 convolution with padding."""
return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1)
class Model(nn.Module):
"""Simple residual block with two 3x3 convolutions.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
"""
def __init__(self, in_ch, out_ch):
super().__init__()
self.conv1 = conv3x3(in_ch, out_ch)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3(out_ch, out_ch)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.leaky_relu(out)
out = out + identity
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
AdaIN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qe/cqeosomkjl2wyhgautbj64zvn2r6slesq4xiplyeynuljwzp4fnz.py
# Topologically Sorted Source Nodes: [add, instance_norm, mul, add_1], Original ATen: [aten.add, aten._native_batch_norm_legit, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_2
# instance_norm => add_1, rsqrt, var_mean
# mul => mul_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %view_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %getitem_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_mul_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (4 + x2 + (8*x3)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = 1.0
tmp26 = tmp24 + tmp25
tmp27 = tmp0 - tmp10
tmp28 = tmp27 * tmp21
tmp29 = tmp26 * tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp29 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (16*x0)), tmp33, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, instance_norm, mul, add_1], Original ATen: [aten.add, aten._native_batch_norm_legit, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0.run(buf4, primals_4, buf0, primals_2, buf1, buf5, 16, 16, grid=grid(16), stream=stream0)
del buf0
del primals_2
return (buf5, primals_3, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AdaIN(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features * 2)
def forward(self, x, s):
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1, 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
return (1 + gamma) * self.norm(x) + beta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'style_dim': 4, 'num_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_mul_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr1 + (4 + x2 + 8 * x3), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr2 + (4 + x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tmp24 = tmp22 + tmp23
tmp25 = 1.0
tmp26 = tmp24 + tmp25
tmp27 = tmp0 - tmp10
tmp28 = tmp27 * tmp21
tmp29 = tmp26 * tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp29 + tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp33, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 8),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf4 = reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_add_mul_0[grid(16)](buf4,
primals_4, buf0, primals_2, buf1, buf5, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, primals_4, buf1, buf4
class AdaINNew(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features * 2)
def forward(self, input_0, input_1):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_4 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| fpaupier/stargan-v2 | AdaIN | false | 6,697 | [
"MIT"
] | 1 | 18d2e04ed6e6df963b84345e798d94383757aaa2 | https://github.com/fpaupier/stargan-v2/tree/18d2e04ed6e6df963b84345e798d94383757aaa2 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, num_features * 2)
def forward(self, x, s):
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1, 1)
gamma, beta = torch.chunk(h, chunks=2, dim=1)
return (1 + gamma) * self.norm(x) + beta
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
SelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [g], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# g => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ow/cowknkiz5k2g6ztnk6tbibjnrz3filkhxucrp3a2phbhxhq6vwgy.py
# Topologically Sorted Source Nodes: [aff_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# aff_1 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mm, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c3/cc33uengbbrpbc2oxl2uomdjuzv6zahbzvvxfrdoulgb5c2nlpg3.py
# Topologically Sorted Source Nodes: [o, mul, mul_1, y], Original ATen: [aten.cat, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# o => cat
# y => add
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_3, %view_4, %view_5, %view_6],), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cat, 4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, -3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_cat_mul_2 = async_compile.triton('triton_poi_fused_add_cat_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y1 = (yindex // 16)
x2 = xindex
y0 = yindex % 16
tmp25 = tl.load(in_ptr4 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp0 = y1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x2 + (4*y0)), tmp4 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1, 1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x2 + (4*y0)), tmp9 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1, 1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x2 + (4*y0)), tmp14 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1, 1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x2 + (4*y0)), tmp16 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 4.0
tmp24 = tmp22 * tmp23
tmp26 = -3.0
tmp27 = tmp25 * tmp26
tmp28 = tmp24 + tmp27
tl.store(out_ptr0 + (y0 + (16*x2) + (64*y1)), tmp28, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [f], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [g], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [g], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [f], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf4, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [aff], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (1, 16), 0), reinterpret_tensor(buf4, (4, 16), (16, 1), 0), out=buf5)
buf8 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [aff_1], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf5, buf8, 16, 16, grid=grid(16), stream=stream0)
buf9 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf9, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cur_o], Original ATen: [aten.mm]
extern_kernels.mm(buf8, reinterpret_tensor(buf9, (16, 4), (1, 16), 0), out=buf10)
buf11 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [aff_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (1, 16), 64), reinterpret_tensor(buf4, (4, 16), (16, 1), 64), out=buf11)
buf14 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [aff_3], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf11, buf14, 16, 16, grid=grid(16), stream=stream0)
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cur_o_3], Original ATen: [aten.mm]
extern_kernels.mm(buf14, reinterpret_tensor(buf9, (16, 4), (1, 16), 64), out=buf15)
buf16 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [aff_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (1, 16), 128), reinterpret_tensor(buf4, (4, 16), (16, 1), 128), out=buf16)
buf19 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [aff_5], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf16, buf19, 16, 16, grid=grid(16), stream=stream0)
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cur_o_6], Original ATen: [aten.mm]
extern_kernels.mm(buf19, reinterpret_tensor(buf9, (16, 4), (1, 16), 128), out=buf20)
buf21 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [aff_6], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (1, 16), 192), reinterpret_tensor(buf4, (4, 16), (16, 1), 192), out=buf21)
buf24 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [aff_7], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf21, buf24, 16, 16, grid=grid(16), stream=stream0)
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cur_o_9], Original ATen: [aten.mm]
extern_kernels.mm(buf24, reinterpret_tensor(buf9, (16, 4), (1, 16), 192), out=buf25)
buf26 = reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [o, mul, mul_1, y], Original ATen: [aten.cat, aten.mul, aten.add]
triton_poi_fused_add_cat_mul_2.run(buf10, buf15, buf20, buf25, primals_3, buf26, 64, 4, grid=grid(64, 4), stream=stream0)
del buf10
del buf15
del buf20
del buf25
return (buf26, primals_1, primals_3, primals_4, primals_6, reinterpret_tensor(buf4, (4, 16), (16, 1), 0), buf8, reinterpret_tensor(buf4, (4, 16), (16, 1), 64), buf14, reinterpret_tensor(buf4, (4, 16), (16, 1), 128), buf19, reinterpret_tensor(buf4, (4, 16), (16, 1), 192), buf24, reinterpret_tensor(buf9, (4, 16), (16, 1), 192), reinterpret_tensor(buf3, (4, 16), (16, 1), 192), reinterpret_tensor(buf9, (4, 16), (16, 1), 128), reinterpret_tensor(buf3, (4, 16), (16, 1), 128), reinterpret_tensor(buf9, (4, 16), (16, 1), 64), reinterpret_tensor(buf3, (4, 16), (16, 1), 64), reinterpret_tensor(buf9, (4, 16), (16, 1), 0), reinterpret_tensor(buf3, (4, 16), (16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
from torch.nn import init
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
init.normal(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
return init_fun
class SelfAttention(nn.Module):
def __init__(self, feat_dim, proj_dim, gamma):
super(SelfAttention, self).__init__()
self.W_g = torch.nn.Conv2d(feat_dim, proj_dim, kernel_size=(1, 1))
self.W_f = torch.nn.Conv2d(feat_dim, proj_dim, kernel_size=(1, 1))
self.W_h = torch.nn.Conv2d(feat_dim, feat_dim, kernel_size=(1, 1))
self.softmax = torch.nn.Softmax(dim=1)
self.gamma = gamma
self.W_g.apply(weights_init('gaussian'))
self.W_f.apply(weights_init('gaussian'))
self.W_h.apply(weights_init('gaussian'))
self.W_g
self.W_f
self.W_h
def forward(self, x):
f = self.W_f.forward(x)
g = self.W_g.forward(x)
h = self.W_h.forward(x)
N, feat_D, hgt, wid = x.size(0), x.size(1), x.size(2), x.size(3)
proj_D = f.size(1)
f = f.view(N, proj_D, -1).transpose(1, 2)
g = g.view(N, proj_D, -1).transpose(1, 2)
h = h.view(N, feat_D, -1).transpose(1, 2)
o = []
for idx in range(N):
aff = torch.mm(g[idx], f[idx].transpose(0, 1))
aff = self.softmax(aff)
cur_o = torch.mm(aff, h[idx])
cur_o = cur_o.transpose(0, 1).contiguous()
cur_o = cur_o.view(1, feat_D, hgt, wid)
o.append(cur_o)
o = torch.cat(o, 0)
y = self.gamma * o + (1 - self.gamma) * x
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'feat_dim': 4, 'proj_dim': 4, 'gamma': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
from torch.nn import init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_add_cat_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
y1 = yindex // 16
x2 = xindex
y0 = yindex % 16
tmp25 = tl.load(in_ptr4 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp0 = y1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x2 + 4 * y0), tmp4 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1, 1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x2 + 4 * y0), tmp9 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1, 1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x2 + 4 * y0), tmp14 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1, 1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + (x2 + 4 * y0), tmp16 & xmask & ymask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tmp23 = 4.0
tmp24 = tmp22 * tmp23
tmp26 = -3.0
tmp27 = tmp25 * tmp26
tmp28 = tmp24 + tmp27
tl.store(out_ptr0 + (y0 + 16 * x2 + 64 * y1), tmp28, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_3, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = buf0
del buf0
triton_poi_fused_convolution_0[grid(256)](buf4, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf5 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (1, 16), 0),
reinterpret_tensor(buf4, (4, 16), (16, 1), 0), out=buf5)
buf8 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
triton_per_fused__softmax_1[grid(16)](buf5, buf8, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf9 = buf2
del buf2
triton_poi_fused_convolution_0[grid(256)](buf9, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(buf9, (16, 4), (1, 16),
0), out=buf10)
buf11 = buf5
del buf5
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (1, 16), 64),
reinterpret_tensor(buf4, (4, 16), (16, 1), 64), out=buf11)
buf14 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
triton_per_fused__softmax_1[grid(16)](buf11, buf14, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(buf14, reinterpret_tensor(buf9, (16, 4), (1, 16),
64), out=buf15)
buf16 = buf11
del buf11
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (1, 16), 128),
reinterpret_tensor(buf4, (4, 16), (16, 1), 128), out=buf16)
buf19 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
triton_per_fused__softmax_1[grid(16)](buf16, buf19, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(buf19, reinterpret_tensor(buf9, (16, 4), (1, 16),
128), out=buf20)
buf21 = buf16
del buf16
extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (1, 16), 192),
reinterpret_tensor(buf4, (4, 16), (16, 1), 192), out=buf21)
buf24 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
triton_per_fused__softmax_1[grid(16)](buf21, buf24, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(buf24, reinterpret_tensor(buf9, (16, 4), (1, 16),
192), out=buf25)
buf26 = reinterpret_tensor(buf21, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf21
triton_poi_fused_add_cat_mul_2[grid(64, 4)](buf10, buf15, buf20,
buf25, primals_3, buf26, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=
4, num_stages=1)
del buf10
del buf15
del buf20
del buf25
return (buf26, primals_1, primals_3, primals_4, primals_6,
reinterpret_tensor(buf4, (4, 16), (16, 1), 0), buf8,
reinterpret_tensor(buf4, (4, 16), (16, 1), 64), buf14,
reinterpret_tensor(buf4, (4, 16), (16, 1), 128), buf19,
reinterpret_tensor(buf4, (4, 16), (16, 1), 192), buf24,
reinterpret_tensor(buf9, (4, 16), (16, 1), 192), reinterpret_tensor
(buf3, (4, 16), (16, 1), 192), reinterpret_tensor(buf9, (4, 16), (
16, 1), 128), reinterpret_tensor(buf3, (4, 16), (16, 1), 128),
reinterpret_tensor(buf9, (4, 16), (16, 1), 64), reinterpret_tensor(
buf3, (4, 16), (16, 1), 64), reinterpret_tensor(buf9, (4, 16), (16,
1), 0), reinterpret_tensor(buf3, (4, 16), (16, 1), 0))
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
init.normal(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
return init_fun
class SelfAttentionNew(nn.Module):
def __init__(self, feat_dim, proj_dim, gamma):
super(SelfAttentionNew, self).__init__()
self.W_g = torch.nn.Conv2d(feat_dim, proj_dim, kernel_size=(1, 1))
self.W_f = torch.nn.Conv2d(feat_dim, proj_dim, kernel_size=(1, 1))
self.W_h = torch.nn.Conv2d(feat_dim, feat_dim, kernel_size=(1, 1))
self.softmax = torch.nn.Softmax(dim=1)
self.gamma = gamma
self.W_g.apply(weights_init('gaussian'))
self.W_f.apply(weights_init('gaussian'))
self.W_h.apply(weights_init('gaussian'))
self.W_g
self.W_f
self.W_h
def forward(self, input_0):
primals_1 = self.W_g.weight
primals_2 = self.W_g.bias
primals_4 = self.W_f.weight
primals_5 = self.W_f.bias
primals_6 = self.W_h.weight
primals_7 = self.W_h.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| fanyix/flownet2 | SelfAttention | false | 6,698 | [
"Apache-2.0"
] | 1 | 0643beef59eeaf4cf4907d0d51f486ffd713363f | https://github.com/fanyix/flownet2/tree/0643beef59eeaf4cf4907d0d51f486ffd713363f | import math
import torch
import torch.nn as nn
from torch.nn import init
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find('Linear') == 0
) and hasattr(m, 'weight'):
if init_type == 'gaussian':
init.normal(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant(m.bias.data, 0.0)
return init_fun
class Model(nn.Module):
def __init__(self, feat_dim, proj_dim, gamma):
super().__init__()
self.W_g = torch.nn.Conv2d(feat_dim, proj_dim, kernel_size=(1, 1))
self.W_f = torch.nn.Conv2d(feat_dim, proj_dim, kernel_size=(1, 1))
self.W_h = torch.nn.Conv2d(feat_dim, feat_dim, kernel_size=(1, 1))
self.softmax = torch.nn.Softmax(dim=1)
self.gamma = gamma
self.W_g.apply(weights_init('gaussian'))
self.W_f.apply(weights_init('gaussian'))
self.W_h.apply(weights_init('gaussian'))
self.W_g
self.W_f
self.W_h
def forward(self, x):
f = self.W_f.forward(x)
g = self.W_g.forward(x)
h = self.W_h.forward(x)
N, feat_D, hgt, wid = x.size(0), x.size(1), x.size(2), x.size(3)
proj_D = f.size(1)
f = f.view(N, proj_D, -1).transpose(1, 2)
g = g.view(N, proj_D, -1).transpose(1, 2)
h = h.view(N, feat_D, -1).transpose(1, 2)
o = []
for idx in range(N):
aff = torch.mm(g[idx], f[idx].transpose(0, 1))
aff = self.softmax(aff)
cur_o = torch.mm(aff, h[idx])
cur_o = cur_o.transpose(0, 1).contiguous()
cur_o = cur_o.view(1, feat_D, hgt, wid)
o.append(cur_o)
o = torch.cat(o, 0)
y = self.gamma * o + (1 - self.gamma) * x
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
dense_warp | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/df/cdf44hoqxi5iqenpbwcrawtackxxbo2leg7ntrbcx4c4yv73xeog.py
# Topologically Sorted Source Nodes: [iadd, mul_1, iadd_1, mul_2, iadd_2, mul_3, iadd_3], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# iadd => mul
# iadd_1 => add_1
# iadd_2 => add_2
# iadd_3 => add_3
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_5, %arg0_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_33, %slice_37), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_41, %mul_1), kwargs = {})
# %slice_scatter_default : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%mul, %add_1, 3, 0, 3), kwargs = {})
# %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %slice_48, 3, 0, 3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_71, %slice_75), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_79, %mul_2), kwargs = {})
# %slice_scatter_default_2 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %add_2, 3, 0, 2), kwargs = {})
# %slice_scatter_default_3 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_2, %slice_86, 3, 0, 2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_109, %slice_113), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_117, %mul_3), kwargs = {})
# %slice_scatter_default_4 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_3, %add_3, 3, 0, 1), kwargs = {})
# %slice_scatter_default_5 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %slice_124, 3, 0, 1), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 26, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 64)
x4 = xindex % 16
x5 = xindex
x1 = (xindex // 4) % 4
x6 = (xindex // 4)
tmp61 = tl.load(in_ptr0 + (x4 + (64*x3)), xmask, eviction_policy='evict_last')
tmp62 = tl.load(in_ptr1 + (x5), xmask)
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp4 & tmp2
tmp6 = tmp4 & tmp5
tmp7 = tl.load(in_ptr0 + (x4 + (64*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tl.load(in_ptr1 + (x5), tmp6 & xmask, other=0.0)
tmp9 = tmp7 * tmp8
tmp10 = tl.load(in_ptr0 + (16 + x4 + (64*x3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr1 + (1 + x5), tmp6 & xmask, other=0.0)
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.load(in_ptr0 + (x4 + (64*x3)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.load(in_ptr1 + (x5), tmp5 & xmask, other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tl.where(tmp4, tmp15, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp5, tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (16 + x4 + (64*x3)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr1 + (1 + x5), tmp5 & xmask, other=0.0)
tmp24 = tmp22 * tmp23
tmp25 = tmp18 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp5, tmp25, tmp26)
tmp28 = tl.load(in_ptr0 + (x4 + (64*x3)), tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr1 + (x5), tmp2 & xmask, other=0.0)
tmp30 = tmp28 * tmp29
tmp31 = tl.where(tmp4, tmp27, tmp30)
tmp32 = tl.where(tmp4, tmp21, tmp31)
tmp33 = tl.load(in_ptr0 + (32 + x4 + (64*x3)), tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tl.load(in_ptr1 + (2 + x5), tmp2 & xmask, other=0.0)
tmp35 = tmp33 * tmp34
tmp36 = tmp32 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp2, tmp36, tmp37)
tmp39 = tmp4 & tmp4
tmp40 = tl.load(in_ptr0 + (x4 + (64*x3)), tmp39 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tl.load(in_ptr1 + (x5), tmp39 & xmask, other=0.0)
tmp42 = tmp40 * tmp41
tmp43 = tl.load(in_ptr0 + (16 + x4 + (64*x3)), tmp39 & xmask, eviction_policy='evict_last', other=0.0)
tmp44 = tl.load(in_ptr1 + (1 + x5), tmp39 & xmask, other=0.0)
tmp45 = tmp43 * tmp44
tmp46 = tmp42 + tmp45
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp39, tmp46, tmp47)
tmp49 = tl.load(in_ptr0 + (x4 + (64*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.load(in_ptr1 + (x5), tmp4 & xmask, other=0.0)
tmp51 = tmp49 * tmp50
tmp52 = tl.where(tmp4, tmp48, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp4, tmp52, tmp53)
tmp55 = tl.load(in_ptr0 + (16 + x4 + (64*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr1 + (1 + x5), tmp4 & xmask, other=0.0)
tmp57 = tmp55 * tmp56
tmp58 = tmp51 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp4, tmp58, tmp59)
tmp63 = tmp61 * tmp62
tmp64 = tl.where(tmp4, tmp60, tmp63)
tmp65 = tl.where(tmp4, tmp54, tmp64)
tmp66 = tl.where(tmp2, tmp38, tmp65)
tmp67 = tl.full([1], 1, tl.int64)
tmp68 = tmp0 < tmp67
tmp69 = tmp68 & tmp68
tmp70 = tmp2 & tmp69
tmp71 = tl.where(tmp2, tmp66, tmp66)
tmp72 = tl.load(in_ptr0 + (48 + (4*x1) + (64*x3)), tmp69 & xmask, eviction_policy='evict_last', other=0.0)
tmp73 = tl.load(in_ptr1 + (3 + (4*x6)), tmp69 & xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tmp72 * tmp73
tmp75 = tmp71 + tmp74
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp69, tmp75, tmp76)
tmp78 = tmp2 & tmp68
tmp79 = tl.where(tmp68, tmp77, tmp71)
tmp80 = tl.full(tmp79.shape, 0.0, tmp79.dtype)
tmp81 = tl.where(tmp68, tmp79, tmp80)
tmp82 = tl.load(in_ptr0 + (48 + (4*x1) + (64*x3)), tmp68 & xmask, eviction_policy='evict_last', other=0.0)
tmp83 = tl.load(in_ptr1 + (3 + (4*x6)), tmp68 & xmask, eviction_policy='evict_last', other=0.0)
tmp84 = tmp82 * tmp83
tmp85 = tmp71 + tmp84
tmp86 = tl.full(tmp85.shape, 0.0, tmp85.dtype)
tmp87 = tl.where(tmp68, tmp85, tmp86)
tmp88 = tl.where(tmp68, tmp87, tmp71)
tmp89 = tl.where(tmp68, tmp81, tmp88)
tl.store(in_out_ptr0 + (x5), tmp89, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [iadd, mul_1, iadd_1, mul_2, iadd_2, mul_3, iadd_3], Original ATen: [aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(buf1, arg1_1, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class dense_warp(nn.Module):
def __init__(self):
super().__init__()
def forward(self, h1, cost):
g2 = torch.zeros_like(h1)
clone_h1 = h1.detach()
if h1.device.type == 'cuda':
g2 = g2
clone_h1 = clone_h1
for d in range(cost.size()[-3]):
g2[:, :, :, 0:cost.size()[-1] - d] += cost[:, d:d + 1, :, 0:
cost.size()[-1] - d].mul(clone_h1[:, :, :, d:cost.size()[-1]])
return g2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x1 = xindex // 4 % 4
x6 = xindex // 4
tmp61 = tl.load(in_ptr0 + (x4 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp62 = tl.load(in_ptr1 + x5, xmask)
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp4 & tmp2
tmp6 = tmp4 & tmp5
tmp7 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tl.load(in_ptr1 + x5, tmp6 & xmask, other=0.0)
tmp9 = tmp7 * tmp8
tmp10 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr1 + (1 + x5), tmp6 & xmask, other=0.0)
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp5 & xmask, eviction_policy
='evict_last', other=0.0)
tmp17 = tl.load(in_ptr1 + x5, tmp5 & xmask, other=0.0)
tmp18 = tmp16 * tmp17
tmp19 = tl.where(tmp4, tmp15, tmp18)
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp5, tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp5 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr1 + (1 + x5), tmp5 & xmask, other=0.0)
tmp24 = tmp22 * tmp23
tmp25 = tmp18 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp5, tmp25, tmp26)
tmp28 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp2 & xmask, eviction_policy
='evict_last', other=0.0)
tmp29 = tl.load(in_ptr1 + x5, tmp2 & xmask, other=0.0)
tmp30 = tmp28 * tmp29
tmp31 = tl.where(tmp4, tmp27, tmp30)
tmp32 = tl.where(tmp4, tmp21, tmp31)
tmp33 = tl.load(in_ptr0 + (32 + x4 + 64 * x3), tmp2 & xmask,
eviction_policy='evict_last', other=0.0)
tmp34 = tl.load(in_ptr1 + (2 + x5), tmp2 & xmask, other=0.0)
tmp35 = tmp33 * tmp34
tmp36 = tmp32 + tmp35
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp2, tmp36, tmp37)
tmp39 = tmp4 & tmp4
tmp40 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp39 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tl.load(in_ptr1 + x5, tmp39 & xmask, other=0.0)
tmp42 = tmp40 * tmp41
tmp43 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp39 & xmask,
eviction_policy='evict_last', other=0.0)
tmp44 = tl.load(in_ptr1 + (1 + x5), tmp39 & xmask, other=0.0)
tmp45 = tmp43 * tmp44
tmp46 = tmp42 + tmp45
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp39, tmp46, tmp47)
tmp49 = tl.load(in_ptr0 + (x4 + 64 * x3), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp50 = tl.load(in_ptr1 + x5, tmp4 & xmask, other=0.0)
tmp51 = tmp49 * tmp50
tmp52 = tl.where(tmp4, tmp48, tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp4, tmp52, tmp53)
tmp55 = tl.load(in_ptr0 + (16 + x4 + 64 * x3), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr1 + (1 + x5), tmp4 & xmask, other=0.0)
tmp57 = tmp55 * tmp56
tmp58 = tmp51 + tmp57
tmp59 = tl.full(tmp58.shape, 0.0, tmp58.dtype)
tmp60 = tl.where(tmp4, tmp58, tmp59)
tmp63 = tmp61 * tmp62
tmp64 = tl.where(tmp4, tmp60, tmp63)
tmp65 = tl.where(tmp4, tmp54, tmp64)
tmp66 = tl.where(tmp2, tmp38, tmp65)
tmp67 = tl.full([1], 1, tl.int64)
tmp68 = tmp0 < tmp67
tmp69 = tmp68 & tmp68
tmp2 & tmp69
tmp71 = tl.where(tmp2, tmp66, tmp66)
tmp72 = tl.load(in_ptr0 + (48 + 4 * x1 + 64 * x3), tmp69 & xmask,
eviction_policy='evict_last', other=0.0)
tmp73 = tl.load(in_ptr1 + (3 + 4 * x6), tmp69 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp74 = tmp72 * tmp73
tmp75 = tmp71 + tmp74
tmp76 = tl.full(tmp75.shape, 0.0, tmp75.dtype)
tmp77 = tl.where(tmp69, tmp75, tmp76)
tmp2 & tmp68
tmp79 = tl.where(tmp68, tmp77, tmp71)
tmp80 = tl.full(tmp79.shape, 0.0, tmp79.dtype)
tmp81 = tl.where(tmp68, tmp79, tmp80)
tmp82 = tl.load(in_ptr0 + (48 + 4 * x1 + 64 * x3), tmp68 & xmask,
eviction_policy='evict_last', other=0.0)
tmp83 = tl.load(in_ptr1 + (3 + 4 * x6), tmp68 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp84 = tmp82 * tmp83
tmp85 = tmp71 + tmp84
tmp86 = tl.full(tmp85.shape, 0.0, tmp85.dtype)
tmp87 = tl.where(tmp68, tmp85, tmp86)
tmp88 = tl.where(tmp68, tmp87, tmp71)
tmp89 = tl.where(tmp68, tmp81, tmp88)
tl.store(in_out_ptr0 + x5, tmp89, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](buf1, arg1_1, arg0_1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class dense_warpNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| fqhank/HESIC | dense_warp | false | 6,699 | [
"Apache-2.0"
] | 1 | f15cb8e6822af45f0022ea4887fce915e250ed75 | https://github.com/fqhank/HESIC/tree/f15cb8e6822af45f0022ea4887fce915e250ed75 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, h1, cost):
g2 = torch.zeros_like(h1)
clone_h1 = h1.detach()
if h1.device.type == 'cuda':
g2 = g2
clone_h1 = clone_h1
for d in range(cost.size()[-3]):
g2[:, :, :, 0:cost.size()[-1] - d] += cost[:, d:d + 1, :, 0:
cost.size()[-1] - d].mul(clone_h1[:, :, :, d:cost.size()[-1]])
return g2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
KernelMatcher | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ps/cps5r3mimvzmxfkludsnsxwqfrprrusee5gwryeh4w7edcd7walj.py
# Topologically Sorted Source Nodes: [k_embed, k_norm], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.clamp_min]
# Source node to ATen node mapping:
# k_embed => mul
# k_norm => clamp_min, pow_1, pow_2, sum_1
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %unsqueeze), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-10), kwargs = {})
triton_poi_fused_clamp_min_linalg_vector_norm_mul_0 = async_compile.triton('triton_poi_fused_clamp_min_linalg_vector_norm_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_linalg_vector_norm_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_min_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = 1e-10
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ny/cnyuymkbpiqhtjgqpwlinpbmbfxj6fw64hfk3le4jmznef66mptc.py
# Topologically Sorted Source Nodes: [k_embed, k_norm], Original ATen: [aten.mul, aten.div]
# Source node to ATen node mapping:
# k_embed => mul
# k_norm => div
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %unsqueeze), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %expand), kwargs = {})
triton_poi_fused_div_mul_1 = async_compile.triton('triton_poi_fused_div_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x4 = (xindex // 4)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + (x5), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fk/cfkpb4ktlmv2quxk6raxdfre2a2g2vszmw6fgblhx44wxl6p3vbc.py
# Topologically Sorted Source Nodes: [sub, pow_1, neg, pow_2, truediv, truediv_1, kernel_outputs, sum_1], Original ATen: [aten.sub, aten.pow, aten.neg, aten.div, aten.exp, aten.sum]
# Source node to ATen node mapping:
# kernel_outputs => exp
# neg => neg
# pow_1 => pow_5
# pow_2 => pow_6
# sub => sub
# sum_1 => sum_3
# truediv => div_2
# truediv_1 => div_3
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze_4, %arg4_1), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%pow_5,), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg5_1, 2), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %pow_6), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_3,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2]), kwargs = {})
triton_poi_fused_div_exp_neg_pow_sub_sum_2 = async_compile.triton('triton_poi_fused_div_exp_neg_pow_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_neg_pow_sub_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_exp_neg_pow_sub_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 21)
x0 = xindex % 21
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = -tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 / tmp8
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = tl_math.exp(tmp11)
tmp15 = tmp13 * tmp14
tmp16 = tmp15 - tmp3
tmp17 = tmp16 * tmp16
tmp18 = -tmp17
tmp19 = tmp18 / tmp8
tmp20 = tmp19 * tmp10
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp12 + tmp21
tmp25 = tmp23 * tmp24
tmp26 = tmp25 - tmp3
tmp27 = tmp26 * tmp26
tmp28 = -tmp27
tmp29 = tmp28 / tmp8
tmp30 = tmp29 * tmp10
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp22 + tmp31
tmp35 = tmp33 * tmp34
tmp36 = tmp35 - tmp3
tmp37 = tmp36 * tmp36
tmp38 = -tmp37
tmp39 = tmp38 / tmp8
tmp40 = tmp39 * tmp10
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp32 + tmp41
tl.store(out_ptr0 + (x2), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dm/cdmmcpjmg6vkigutcpwgkxhqfgxp7syymhmshwvkinhujhvx7rra.py
# Topologically Sorted Source Nodes: [clamp, log, kernel_outputs_1, logits], Original ATen: [aten.clamp, aten.log, aten.mul, aten.sum]
# Source node to ATen node mapping:
# clamp => clamp_min_2
# kernel_outputs_1 => mul_3
# log => log
# logits => sum_4
# Graph fragment:
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_3, 1e-10), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, 0.01), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {})
triton_poi_fused_clamp_log_mul_sum_3 = async_compile.triton('triton_poi_fused_clamp_log_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_log_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_log_mul_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 84
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 21
x1 = (xindex // 21)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (84*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (21 + x0 + (84*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (42 + x0 + (84*x1)), xmask)
tmp16 = tl.load(in_ptr0 + (63 + x0 + (84*x1)), xmask)
tmp1 = 1e-10
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tl_math.log(tmp2)
tmp4 = 0.01
tmp5 = tmp3 * tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tl_math.log(tmp7)
tmp9 = tmp8 * tmp4
tmp10 = tmp5 + tmp9
tmp12 = triton_helpers.maximum(tmp11, tmp1)
tmp13 = tl_math.log(tmp12)
tmp14 = tmp13 * tmp4
tmp15 = tmp10 + tmp14
tmp17 = triton_helpers.maximum(tmp16, tmp1)
tmp18 = tl_math.log(tmp17)
tmp19 = tmp18 * tmp4
tmp20 = tmp15 + tmp19
tl.store(out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, 4), (4, 1))
assert_size_stride(arg4_1, (1, 1, 1, 21), (21, 21, 21, 1))
assert_size_stride(arg5_1, (1, 1, 1, 21), (21, 21, 21, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [k_embed, k_norm], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.clamp_min]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_min_linalg_vector_norm_mul_0.run(arg1_1, arg0_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [v_embed, v_norm], Original ATen: [aten.mul, aten.linalg_vector_norm, aten.clamp_min]
triton_poi_fused_clamp_min_linalg_vector_norm_mul_0.run(arg3_1, arg2_1, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k_embed, k_norm], Original ATen: [aten.mul, aten.div]
triton_poi_fused_div_mul_1.run(arg1_1, arg0_1, buf0, buf2, 64, grid=grid(64), stream=stream0)
del arg1_1
del buf0
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_embed, v_norm], Original ATen: [aten.mul, aten.div]
triton_poi_fused_div_mul_1.run(arg3_1, arg2_1, buf1, buf3, 64, grid=grid(64), stream=stream0)
del arg3_1
del buf1
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k_embed, k_norm, bmm_1], Original ATen: [aten.mul, aten.div, aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), out=buf4)
del buf2
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [k_by_v_mask], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 0), reinterpret_tensor(arg2_1, (4, 1, 4), (4, 1, 1), 0), out=buf5)
del arg0_1
del arg2_1
buf6 = empty_strided_cuda((4, 4, 21), (84, 21, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, pow_1, neg, pow_2, truediv, truediv_1, kernel_outputs, sum_1], Original ATen: [aten.sub, aten.pow, aten.neg, aten.div, aten.exp, aten.sum]
triton_poi_fused_div_exp_neg_pow_sub_sum_2.run(buf4, buf5, arg4_1, arg5_1, buf6, 336, grid=grid(336), stream=stream0)
del arg4_1
del arg5_1
del buf4
del buf5
buf7 = empty_strided_cuda((4, 21), (21, 1), torch.float32)
# Topologically Sorted Source Nodes: [clamp, log, kernel_outputs_1, logits], Original ATen: [aten.clamp, aten.log, aten.mul, aten.sum]
triton_poi_fused_clamp_log_mul_sum_3.run(buf6, buf7, 84, grid=grid(84), stream=stream0)
del buf6
return (buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((1, 1, 1, 21), (21, 21, 21, 1), device='cuda:0', dtype=torch.float32)
arg5_1 = rand_strided((1, 1, 1, 21), (21, 21, 21, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Dict
import torch.nn as nn
import torch.nn.functional as F
class KernelMatcher(nn.Module):
def __init__(self, embed_dim: 'int', kernel_num: 'int'=21) ->None:
super(KernelMatcher, self).__init__()
self._embed_dim = embed_dim
self._kernel_num = kernel_num
mus, sigmas = self.kernel_init(self._kernel_num)
self._mus = nn.Parameter(mus, requires_grad=False)
self._sigmas = nn.Parameter(sigmas, requires_grad=False)
def kernel_init(self, kernel_num: 'int') ->Dict[str, torch.Tensor]:
mus = [1]
bin_size = 2.0 / (kernel_num - 1)
mus.append(1 - bin_size / 2)
for i in range(1, kernel_num - 1):
mus.append(mus[i] - bin_size)
mus = torch.tensor(mus).view(1, 1, 1, kernel_num)
sigmas = [0.001]
sigmas += [0.1] * (kernel_num - 1)
sigmas = torch.tensor(sigmas).view(1, 1, 1, kernel_num)
return mus, sigmas
def forward(self, k_embed: 'torch.Tensor', k_mask: 'torch.Tensor',
v_embed: 'torch.Tensor', v_mask: 'torch.Tensor') ->torch.Tensor:
k_embed = k_embed * k_mask.unsqueeze(-1)
v_embed = v_embed * v_mask.unsqueeze(-1)
k_by_v_mask = torch.bmm(k_mask.float().unsqueeze(-1), v_mask.float(
).unsqueeze(-1).transpose(1, 2))
k_norm = F.normalize(k_embed, p=2, dim=2, eps=1e-10)
v_norm = F.normalize(v_embed, p=2, dim=2, eps=1e-10)
inter = (torch.bmm(k_norm, v_norm.transpose(1, 2)) * k_by_v_mask
).unsqueeze(-1)
kernel_outputs = torch.exp(-(inter - self._mus) ** 2 / self._sigmas **
2 / 2)
kernel_outputs = kernel_outputs.sum(dim=2).clamp(min=1e-10).log(
) * 0.01
logits = kernel_outputs.sum(dim=1)
return logits
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from typing import Dict
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_min_linalg_vector_norm_mul_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp9
tmp11 = tmp7 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = 1e-10
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_div_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x4 = xindex // 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + x5, tmp4, xmask)
@triton.jit
def triton_poi_fused_div_exp_neg_pow_sub_sum_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 21
x0 = xindex % 21
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp33 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = -tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 / tmp8
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = tl_math.exp(tmp11)
tmp15 = tmp13 * tmp14
tmp16 = tmp15 - tmp3
tmp17 = tmp16 * tmp16
tmp18 = -tmp17
tmp19 = tmp18 / tmp8
tmp20 = tmp19 * tmp10
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp12 + tmp21
tmp25 = tmp23 * tmp24
tmp26 = tmp25 - tmp3
tmp27 = tmp26 * tmp26
tmp28 = -tmp27
tmp29 = tmp28 / tmp8
tmp30 = tmp29 * tmp10
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp22 + tmp31
tmp35 = tmp33 * tmp34
tmp36 = tmp35 - tmp3
tmp37 = tmp36 * tmp36
tmp38 = -tmp37
tmp39 = tmp38 / tmp8
tmp40 = tmp39 * tmp10
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp32 + tmp41
tl.store(out_ptr0 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused_clamp_log_mul_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 84
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 21
x1 = xindex // 21
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 84 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (21 + x0 + 84 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (42 + x0 + 84 * x1), xmask)
tmp16 = tl.load(in_ptr0 + (63 + x0 + 84 * x1), xmask)
tmp1 = 1e-10
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tl_math.log(tmp2)
tmp4 = 0.01
tmp5 = tmp3 * tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tl_math.log(tmp7)
tmp9 = tmp8 * tmp4
tmp10 = tmp5 + tmp9
tmp12 = triton_helpers.maximum(tmp11, tmp1)
tmp13 = tl_math.log(tmp12)
tmp14 = tmp13 * tmp4
tmp15 = tmp10 + tmp14
tmp17 = triton_helpers.maximum(tmp16, tmp1)
tmp18 = tl_math.log(tmp17)
tmp19 = tmp18 * tmp4
tmp20 = tmp15 + tmp19
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, 4), (4, 1))
assert_size_stride(arg4_1, (1, 1, 1, 21), (21, 21, 21, 1))
assert_size_stride(arg5_1, (1, 1, 1, 21), (21, 21, 21, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_min_linalg_vector_norm_mul_0[grid(16)](arg1_1,
arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_clamp_min_linalg_vector_norm_mul_0[grid(16)](arg3_1,
arg2_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_mul_1[grid(64)](arg1_1, arg0_1, buf0, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg1_1
del buf0
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_div_mul_1[grid(64)](arg3_1, arg2_1, buf1, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del arg3_1
del buf1
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (4, 4, 4), (16, 1,
4), 0), out=buf4)
del buf2
buf5 = buf3
del buf3
extern_kernels.bmm(reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1),
0), reinterpret_tensor(arg2_1, (4, 1, 4), (4, 1, 1), 0), out=buf5)
del arg0_1
del arg2_1
buf6 = empty_strided_cuda((4, 4, 21), (84, 21, 1), torch.float32)
triton_poi_fused_div_exp_neg_pow_sub_sum_2[grid(336)](buf4, buf5,
arg4_1, arg5_1, buf6, 336, XBLOCK=128, num_warps=4, num_stages=1)
del arg4_1
del arg5_1
del buf4
del buf5
buf7 = empty_strided_cuda((4, 21), (21, 1), torch.float32)
triton_poi_fused_clamp_log_mul_sum_3[grid(84)](buf6, buf7, 84,
XBLOCK=128, num_warps=4, num_stages=1)
del buf6
return buf7,
class KernelMatcherNew(nn.Module):
def __init__(self, embed_dim: 'int', kernel_num: 'int'=21) ->None:
super(KernelMatcherNew, self).__init__()
self._embed_dim = embed_dim
self._kernel_num = kernel_num
mus, sigmas = self.kernel_init(self._kernel_num)
self._mus = nn.Parameter(mus, requires_grad=False)
self._sigmas = nn.Parameter(sigmas, requires_grad=False)
def kernel_init(self, kernel_num: 'int') ->Dict[str, torch.Tensor]:
mus = [1]
bin_size = 2.0 / (kernel_num - 1)
mus.append(1 - bin_size / 2)
for i in range(1, kernel_num - 1):
mus.append(mus[i] - bin_size)
mus = torch.tensor(mus).view(1, 1, 1, kernel_num)
sigmas = [0.001]
sigmas += [0.1] * (kernel_num - 1)
sigmas = torch.tensor(sigmas).view(1, 1, 1, kernel_num)
return mus, sigmas
def forward(self, input_0, input_1, input_2, input_3):
arg4_1 = self._mus
arg5_1 = self._sigmas
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1])
return output[0]
| fengtaoo/opmft | KernelMatcher | false | 6,700 | [
"MIT"
] | 1 | 64f2a12c724295cd913eda02502f2e2a20f2dd55 | https://github.com/fengtaoo/opmft/tree/64f2a12c724295cd913eda02502f2e2a20f2dd55 | import torch
from typing import Dict
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, embed_dim: 'int', kernel_num: 'int'=21) ->None:
super().__init__()
self._embed_dim = embed_dim
self._kernel_num = kernel_num
mus, sigmas = self.kernel_init(self._kernel_num)
self._mus = nn.Parameter(mus, requires_grad=False)
self._sigmas = nn.Parameter(sigmas, requires_grad=False)
def kernel_init(self, kernel_num: 'int') ->Dict[str, torch.Tensor]:
mus = [1]
bin_size = 2.0 / (kernel_num - 1)
mus.append(1 - bin_size / 2)
for i in range(1, kernel_num - 1):
mus.append(mus[i] - bin_size)
mus = torch.tensor(mus).view(1, 1, 1, kernel_num)
sigmas = [0.001]
sigmas += [0.1] * (kernel_num - 1)
sigmas = torch.tensor(sigmas).view(1, 1, 1, kernel_num)
return mus, sigmas
def forward(self, k_embed: 'torch.Tensor', k_mask: 'torch.Tensor',
v_embed: 'torch.Tensor', v_mask: 'torch.Tensor') ->torch.Tensor:
k_embed = k_embed * k_mask.unsqueeze(-1)
v_embed = v_embed * v_mask.unsqueeze(-1)
k_by_v_mask = torch.bmm(k_mask.float().unsqueeze(-1), v_mask.float(
).unsqueeze(-1).transpose(1, 2))
k_norm = F.normalize(k_embed, p=2, dim=2, eps=1e-10)
v_norm = F.normalize(v_embed, p=2, dim=2, eps=1e-10)
inter = (torch.bmm(k_norm, v_norm.transpose(1, 2)) * k_by_v_mask
).unsqueeze(-1)
kernel_outputs = torch.exp(-(inter - self._mus) ** 2 / self._sigmas **
2 / 2)
kernel_outputs = kernel_outputs.sum(dim=2).clamp(min=1e-10).log(
) * 0.01
logits = kernel_outputs.sum(dim=1)
return logits
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
ResBlk | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wo/cwo5hzyj7r5kfs5qkbujhau55erj2h3367t3krgxxma4ysrszby7.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %primals_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vo/cvo56aotw4yuhuax6oyrf43t5ssqhzuwodjmjfylt42bqssid7vq.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iz/ciznb73q5a3l5esh4sw5ejdw3d2b6uuntjcpgiok5w7sbiizunre.py
# Topologically Sorted Source Nodes: [x_3, x_4, truediv], Original ATen: [aten.convolution, aten.add, aten.div]
# Source node to ATen node mapping:
# truediv => div
# x_3 => convolution_1
# x_4 => add
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %convolution_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 1.4142135623730951), kwargs = {})
triton_poi_fused_add_convolution_div_2 = async_compile.triton('triton_poi_fused_add_convolution_div_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_div_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_out_ptr0 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = 0.7071067811865475
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_3, buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4, truediv], Original ATen: [aten.convolution, aten.add, aten.div]
triton_poi_fused_add_convolution_div_2.run(buf5, primals_1, primals_5, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_5
return (buf5, primals_2, primals_4, buf0, buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def normalize(x, eps=1e-06):
"""Apply min-max normalization."""
x = x.contiguous()
N, C, H, W = x.size()
x_ = x.view(N * C, -1)
max_val = torch.max(x_, dim=1, keepdim=True)[0]
min_val = torch.min(x_, dim=1, keepdim=True)[0]
x_ = (x_ - min_val) / (max_val - min_val + eps)
out = x_.view(N, C, H, W)
return out
class ResBlk(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize=
False, downsample=False):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = downsample
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x):
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp5 = 0.7071067811865475
tmp6 = tmp4 * tmp5
tl.store(in_out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](primals_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1,
primals_3, buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_div_2[grid(256)](buf5, primals_1,
primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf5, primals_2, primals_4, buf0, buf2, buf3
def normalize(x, eps=1e-06):
"""Apply min-max normalization."""
x = x.contiguous()
N, C, H, W = x.size()
x_ = x.view(N * C, -1)
max_val = torch.max(x_, dim=1, keepdim=True)[0]
min_val = torch.min(x_, dim=1, keepdim=True)[0]
x_ = (x_ - min_val) / (max_val - min_val + eps)
out = x_.view(N, C, H, W)
return out
class ResBlkNew(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize=
False, downsample=False):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = downsample
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| fpaupier/stargan-v2 | ResBlk | false | 6,701 | [
"MIT"
] | 1 | 18d2e04ed6e6df963b84345e798d94383757aaa2 | https://github.com/fpaupier/stargan-v2/tree/18d2e04ed6e6df963b84345e798d94383757aaa2 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def normalize(x, eps=1e-06):
"""Apply min-max normalization."""
x = x.contiguous()
N, C, H, W = x.size()
x_ = x.view(N * C, -1)
max_val = torch.max(x_, dim=1, keepdim=True)[0]
min_val = torch.min(x_, dim=1, keepdim=True)[0]
x_ = (x_ - min_val) / (max_val - min_val + eps)
out = x_.view(N, C, H, W)
return out
class Model(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize=
False, downsample=False):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = downsample
self.learned_sc = dim_in != dim_out
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x):
x = self._shortcut(x) + self._residual(x)
return x / math.sqrt(2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Enhancement_Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gw/cgwips3show4vjbttvtj7uocwtsin2z3hk2swl6ehmuz3quabsat.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# out => convolution
# out_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q5/cq5c6b2vnj2yohdbs2yfrzumcstrubm63clhsgiqfksf3dxqgcxs.py
# Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# out_2 => convolution_1
# out_3 => gt_1, mul_1, where_1
# out_4 => add
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %primals_1), kwargs = {})
# %gt_10 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x3), None)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp9, None)
tl.store(out_ptr1 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vn/cvnmllhhvuga55zco3pxqydujnre2gngaiyxshyh5yffqvzr336w.py
# Topologically Sorted Source Nodes: [out_12, out_13, out_14, out_15], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# out_12 => convolution_5
# out_13 => gt_5, mul_5, where_5
# out_14 => add_2
# out_15 => add_3
# Graph fragment:
# %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 0.01), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_5, %mul_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_5, %add_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %primals_1), kwargs = {})
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_5, 0), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x3), None)
tmp10 = tl.load(in_ptr3 + (x3), None)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp11, None)
tl.store(out_ptr1 + (x3), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 32, 64, 64), (131072, 4096, 64, 1))
assert_size_stride(primals_2, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (32, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_3, 524288, grid=grid(524288), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
buf14 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_2, out_3, out_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward]
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1.run(buf2, primals_5, primals_1, buf3, buf14, 524288, grid=grid(524288), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf5, primals_7, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf7 = buf2; del buf2 # reuse
buf13 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_7, out_8, out_9], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward]
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1.run(buf6, primals_9, buf3, buf7, buf13, 524288, grid=grid(524288), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out_10, out_11], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf9, primals_11, 524288, grid=grid(524288), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf11 = buf6; del buf6 # reuse
buf12 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_12, out_13, out_14, out_15], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward]
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2.run(buf10, primals_13, buf7, primals_1, buf11, buf12, 524288, grid=grid(524288), stream=stream0)
del buf10
del primals_13
return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, buf1, buf3, buf5, buf7, buf9, buf12, buf13, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 32, 64, 64), (131072, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def conv3x3(in_ch, out_ch, stride=1):
"""3x3 convolution with padding."""
return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1)
class ResidualBlock(nn.Module):
"""Simple residual block with two 3x3 convolutions.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
"""
def __init__(self, in_ch, out_ch):
super().__init__()
self.conv1 = conv3x3(in_ch, out_ch)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3(out_ch, out_ch)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.leaky_relu(out)
out = out + identity
return out
class Enhancement_Block(nn.Module):
def __init__(self):
super().__init__()
self.RB1 = ResidualBlock(32, 32)
self.RB2 = ResidualBlock(32, 32)
self.RB3 = ResidualBlock(32, 32)
def forward(self, x):
identity = x
out = self.RB1(x)
out = self.RB2(out)
out = self.RB3(out)
out = out + identity
return out
def get_inputs():
return [torch.rand([4, 32, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x3, None)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp9, None)
tl.store(out_ptr1 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x3, None)
tmp10 = tl.load(in_ptr3 + x3, None)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp11, None)
tl.store(out_ptr1 + x3, tmp12, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 32, 64, 64), (131072, 4096, 64, 1))
assert_size_stride(primals_2, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_13, (32,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf1,
primals_3, 524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
buf14 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1[grid
(524288)](buf2, primals_5, primals_1, buf3, buf14, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf5,
primals_7, 524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf7 = buf2
del buf2
buf13 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_1[grid
(524288)](buf6, primals_9, buf3, buf7, buf13, 524288, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_9
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf9,
primals_11, 524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf11 = buf6
del buf6
buf12 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_2[grid
(524288)](buf10, primals_13, buf7, primals_1, buf11, buf12,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del buf10
del primals_13
return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, primals_12, buf1, buf3, buf5, buf7, buf9, buf12, buf13,
buf14)
def conv3x3(in_ch, out_ch, stride=1):
"""3x3 convolution with padding."""
return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1)
class ResidualBlock(nn.Module):
"""Simple residual block with two 3x3 convolutions.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
"""
def __init__(self, in_ch, out_ch):
super().__init__()
self.conv1 = conv3x3(in_ch, out_ch)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3(out_ch, out_ch)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.leaky_relu(out)
out = out + identity
return out
class Enhancement_BlockNew(nn.Module):
def __init__(self):
super().__init__()
self.RB1 = ResidualBlock(32, 32)
self.RB2 = ResidualBlock(32, 32)
self.RB3 = ResidualBlock(32, 32)
def forward(self, input_0):
primals_2 = self.RB1.conv1.weight
primals_3 = self.RB1.conv1.bias
primals_4 = self.RB1.conv2.weight
primals_5 = self.RB1.conv2.bias
primals_6 = self.RB2.conv1.weight
primals_7 = self.RB2.conv1.bias
primals_8 = self.RB2.conv2.weight
primals_9 = self.RB2.conv2.bias
primals_10 = self.RB3.conv1.weight
primals_11 = self.RB3.conv1.bias
primals_12 = self.RB3.conv2.weight
primals_13 = self.RB3.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| fqhank/HESIC | Enhancement_Block | false | 6,702 | [
"Apache-2.0"
] | 1 | f15cb8e6822af45f0022ea4887fce915e250ed75 | https://github.com/fqhank/HESIC/tree/f15cb8e6822af45f0022ea4887fce915e250ed75 | import torch
import torch.nn as nn
def conv3x3(in_ch, out_ch, stride=1):
"""3x3 convolution with padding."""
return nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1)
class ResidualBlock(nn.Module):
"""Simple residual block with two 3x3 convolutions.
Args:
in_ch (int): number of input channels
out_ch (int): number of output channels
"""
def __init__(self, in_ch, out_ch):
super().__init__()
self.conv1 = conv3x3(in_ch, out_ch)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.conv2 = conv3x3(out_ch, out_ch)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.leaky_relu(out)
out = out + identity
return out
class Model(nn.Module):
def __init__(self):
super().__init__()
self.RB1 = ResidualBlock(32, 32)
self.RB2 = ResidualBlock(32, 32)
self.RB3 = ResidualBlock(32, 32)
def forward(self, x):
identity = x
out = self.RB1(x)
out = self.RB2(out)
out = self.RB3(out)
out = out + identity
return out
def get_inputs():
return [torch.rand([4, 32, 64, 64])]
def get_init_inputs():
return []
|
PolicyNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/m7/cm7pdjo4nnbtzaxtijy33oer2f7pkavzdjxqid2yf6vnctigzhj4.py
# Topologically Sorted Source Nodes: [exp, mul, add, truediv, sub, x_3, x_4], Original ATen: [aten.exp, aten.mul, aten.add, aten.reciprocal, aten.sub]
# Source node to ATen node mapping:
# add => add
# exp => exp
# mul => mul
# sub => sub
# truediv => mul_1, reciprocal
# x_3 => mul_2
# x_4 => mul_3
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%view_5,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, 1.5), kwargs = {})
triton_poi_fused_add_exp_mul_reciprocal_sub_1 = async_compile.triton('triton_poi_fused_add_exp_mul_reciprocal_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_reciprocal_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_reciprocal_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 + tmp2
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = tmp8 - tmp2
tmp10 = -1.0
tmp11 = tmp9 * tmp10
tmp12 = 1.5
tmp13 = tmp11 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, mul, add, truediv, sub, x_3, x_4], Original ATen: [aten.exp, aten.mul, aten.add, aten.reciprocal, aten.sub]
triton_poi_fused_add_exp_mul_reciprocal_sub_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf4, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=3e-05):
super(PolicyNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, num_actions)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
"""
Get output of the policy
Args:
state (torch array): State of the dynamical system
"""
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = self.sigmoid_mod(self.linear3(x))
return x
def get_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0)
action = self.forward(state)
action_np = action.detach().cpu().numpy()[0]
return action_np
def tanh_mod(self, x, p=1):
x = x.float()
x = 2 / (1 + torch.exp(-2 * (x / 100))) - 1
x = x * p
return x
def sigmoid_mod(self, x, p=1.5):
x = x.float()
x = (2 / (1 + torch.exp(x) * 1) - 1) * -1
x = x * p
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_actions': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_reciprocal_sub_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3 + tmp2
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp9 = tmp8 - tmp2
tmp10 = -1.0
tmp11 = tmp9 * tmp10
tmp12 = 1.5
tmp13 = tmp11 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_reciprocal_sub_1[grid(256)](buf4, buf5,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf4, primals_6, buf6, primals_4, buf7
class PolicyNetworkNew(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=3e-05):
super(PolicyNetworkNew, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, num_actions)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def get_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0)
action = self.forward(state)
action_np = action.detach().cpu().numpy()[0]
return action_np
def tanh_mod(self, x, p=1):
x = x.float()
x = 2 / (1 + torch.exp(-2 * (x / 100))) - 1
x = x * p
return x
def sigmoid_mod(self, x, p=1.5):
x = x.float()
x = (2 / (1 + torch.exp(x) * 1) - 1) * -1
x = x * p
return x
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| frknayk/Reinforcement-Learning-In-Control | PolicyNetwork | false | 6,703 | [
"MIT"
] | 1 | 24c7eb6fa6b6390ee2dd04f25036c37896ecd944 | https://github.com/frknayk/Reinforcement-Learning-In-Control/tree/24c7eb6fa6b6390ee2dd04f25036c37896ecd944 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=3e-05):
super().__init__()
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, num_actions)
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
"""
Get output of the policy
Args:
state (torch array): State of the dynamical system
"""
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = self.sigmoid_mod(self.linear3(x))
return x
def get_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0)
action = self.forward(state)
action_np = action.detach().cpu().numpy()[0]
return action_np
def tanh_mod(self, x, p=1):
x = x.float()
x = 2 / (1 + torch.exp(-2 * (x / 100))) - 1
x = x * p
return x
def sigmoid_mod(self, x, p=1.5):
x = x.float()
x = (2 / (1 + torch.exp(x) * 1) - 1) * -1
x = x * p
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
enhance_net_nopool | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py
# Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/t6/ct6prd5ums7vtosvwfpighaetfimfezndu4oevt5kcynqsnxc2wi.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_2, %relu_3], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 64
x0 = xindex % 4096
x2 = (xindex // 262144)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 64, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-32) + x1)) + (131072*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-32) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yp/cyphcwvvchm4no2jwafz2sl7yqmy3phu4bgtem7ff4sjc3tplvma.py
# Topologically Sorted Source Nodes: [conv2d_6, x_r], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# x_r => tanh
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_tanh_2 = async_compile.triton('triton_poi_fused_convolution_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 24
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sx/csxqlkvlklm3rcvi6gcbvrem5mnw6ihywc24rzf5fd25cgiwlhwo.py
# Topologically Sorted Source Nodes: [pow_1, sub, mul, x, pow_2, sub_1, mul_1, x_1, pow_3, sub_2, mul_2, x_2, pow_4, sub_3, mul_3, enhance_image_1, pow_5, sub_4, mul_4, x_3, pow_6, sub_5, mul_5, x_4, pow_7, sub_6, mul_6, x_5, pow_8, sub_7, mul_7, enhance_image], Original ATen: [aten.pow, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# enhance_image => add_7
# enhance_image_1 => add_3
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# pow_5 => pow_5
# pow_6 => pow_6
# pow_7 => pow_7
# pow_8 => pow_8
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# sub_5 => sub_5
# sub_6 => sub_6
# sub_7 => sub_7
# x => add
# x_1 => add_1
# x_2 => add_2
# x_3 => add_4
# x_4 => add_5
# x_5 => add_6
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, %primals_3), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %sub), kwargs = {})
# %add : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %mul), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, %add), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, %sub_1), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_3, %add_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_2, %sub_2), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_2, 2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_4, %add_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_3, %sub_3), kwargs = {})
# %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_3), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_3, 2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_5, %add_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_4, %sub_4), kwargs = {})
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mul_4), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_4, 2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_6, %add_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_5, %sub_5), kwargs = {})
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_5), kwargs = {})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_5, 2), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_7, %add_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_6, %sub_6), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_6), kwargs = {})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_6, 2), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_8, %add_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_7, %sub_7), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_7), kwargs = {})
triton_poi_fused_add_mul_pow_sub_3 = async_compile.triton('triton_poi_fused_add_mul_pow_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sub_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 12288
x1 = (xindex // 12288)
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0 + (98304*x1)), None)
tmp6 = tl.load(in_ptr1 + (12288 + x0 + (98304*x1)), None)
tmp11 = tl.load(in_ptr1 + (24576 + x0 + (98304*x1)), None)
tmp16 = tl.load(in_ptr1 + (36864 + x0 + (98304*x1)), None)
tmp21 = tl.load(in_ptr1 + (49152 + x0 + (98304*x1)), None)
tmp26 = tl.load(in_ptr1 + (61440 + x0 + (98304*x1)), None)
tmp31 = tl.load(in_ptr1 + (73728 + x0 + (98304*x1)), None)
tmp36 = tl.load(in_ptr1 + (86016 + x0 + (98304*x1)), None)
tmp2 = tmp0 * tmp0
tmp3 = tmp2 - tmp0
tmp4 = tmp1 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 * tmp5
tmp8 = tmp7 - tmp5
tmp9 = tmp6 * tmp8
tmp10 = tmp5 + tmp9
tmp12 = tmp10 * tmp10
tmp13 = tmp12 - tmp10
tmp14 = tmp11 * tmp13
tmp15 = tmp10 + tmp14
tmp17 = tmp15 * tmp15
tmp18 = tmp17 - tmp15
tmp19 = tmp16 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = tmp20 * tmp20
tmp23 = tmp22 - tmp20
tmp24 = tmp21 * tmp23
tmp25 = tmp20 + tmp24
tmp27 = tmp25 * tmp25
tmp28 = tmp27 - tmp25
tmp29 = tmp26 * tmp28
tmp30 = tmp25 + tmp29
tmp32 = tmp30 * tmp30
tmp33 = tmp32 - tmp30
tmp34 = tmp31 * tmp33
tmp35 = tmp30 + tmp34
tmp37 = tmp35 * tmp35
tmp38 = tmp37 - tmp35
tmp39 = tmp36 * tmp38
tmp40 = tmp35 + tmp39
tl.store(out_ptr0 + (x2), tmp5, None)
tl.store(out_ptr1 + (x2), tmp20, None)
tl.store(out_ptr2 + (x2), tmp40, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/f5/cf56smsnozdiyvduisumbn2faxpuykoxh3gcmofog4sqc7fsjpmw.py
# Topologically Sorted Source Nodes: [r], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# r => cat_3
# Graph fragment:
# %cat_3 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_1, %getitem_2, %getitem_3, %getitem_4, %getitem_5, %getitem_6, %getitem_7], 1), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 24
x0 = xindex % 4096
x2 = (xindex // 98304)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (98304*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 6, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (12288 + x0 + (4096*((-3) + x1)) + (98304*x2)), tmp9, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 9, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (24576 + x0 + (4096*((-6) + x1)) + (98304*x2)), tmp14, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 12, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (36864 + x0 + (4096*((-9) + x1)) + (98304*x2)), tmp19, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 15, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (49152 + x0 + (4096*((-12) + x1)) + (98304*x2)), tmp24, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 18, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr0 + (61440 + x0 + (4096*((-15) + x1)) + (98304*x2)), tmp29, other=0.0)
tmp31 = tmp0 >= tmp27
tmp32 = tl.full([1], 21, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr0 + (73728 + x0 + (4096*((-18) + x1)) + (98304*x2)), tmp34, other=0.0)
tmp36 = tmp0 >= tmp32
tmp37 = tl.full([1], 24, tl.int64)
tmp38 = tmp0 < tmp37
tmp39 = tl.load(in_ptr0 + (86016 + x0 + (4096*((-21) + x1)) + (98304*x2)), tmp36, other=0.0)
tmp40 = tl.where(tmp34, tmp35, tmp39)
tmp41 = tl.where(tmp29, tmp30, tmp40)
tmp42 = tl.where(tmp24, tmp25, tmp41)
tmp43 = tl.where(tmp19, tmp20, tmp42)
tmp44 = tl.where(tmp14, tmp15, tmp43)
tmp45 = tl.where(tmp9, tmp10, tmp44)
tmp46 = tl.where(tmp4, tmp5, tmp45)
tl.store(out_ptr0 + (x3), tmp46, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sv/csvoew5qi7m2gpjv4jicykul4c5j2ofkaxvtrolqipjc4qrnj3rj.py
# Topologically Sorted Source Nodes: [conv2d_5, x6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_5 => convolution_5
# x6 => relu_5
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (32, ), (1, ))
assert_size_stride(primals_12, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (32, ), (1, ))
assert_size_stride(primals_14, (24, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (24, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 524288, grid=grid(524288), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf5, primals_7, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf5, buf6, primals_9, buf7, 1048576, grid=grid(1048576), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf9 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf3, buf8, primals_11, buf9, 1048576, grid=grid(1048576), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf11 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf1, buf10, primals_13, buf11, 1048576, grid=grid(1048576), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 24, 64, 64), (98304, 4096, 64, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, x_r], Original ATen: [aten.convolution, aten.tanh]
triton_poi_fused_convolution_tanh_2.run(buf13, primals_15, 393216, grid=grid(393216), stream=stream0)
del primals_15
buf14 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
buf15 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
buf16 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sub, mul, x, pow_2, sub_1, mul_1, x_1, pow_3, sub_2, mul_2, x_2, pow_4, sub_3, mul_3, enhance_image_1, pow_5, sub_4, mul_4, x_3, pow_6, sub_5, mul_5, x_4, pow_7, sub_6, mul_6, x_5, pow_8, sub_7, mul_7, enhance_image], Original ATen: [aten.pow, aten.sub, aten.mul, aten.add]
triton_poi_fused_add_mul_pow_sub_3.run(primals_3, buf13, buf14, buf15, buf16, 49152, grid=grid(49152), stream=stream0)
buf17 = empty_strided_cuda((4, 24, 64, 64), (98304, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [r], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(buf13, buf17, 393216, grid=grid(393216), stream=stream0)
buf18 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_5, x6], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_5.run(buf10, primals_13, buf18, 524288, grid=grid(524288), stream=stream0)
del buf10
del primals_13
buf19 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_4, x5], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_5.run(buf8, primals_11, buf19, 524288, grid=grid(524288), stream=stream0)
del buf8
del primals_11
buf20 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_5.run(buf6, primals_9, buf20, 524288, grid=grid(524288), stream=stream0)
del buf6
del primals_9
return (buf15, buf16, buf17, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf1, buf3, buf5, buf7, buf9, buf11, buf13, reinterpret_tensor(buf13, (4, 3, 64, 64), (98304, 4096, 64, 1), 12288), reinterpret_tensor(buf13, (4, 3, 64, 64), (98304, 4096, 64, 1), 24576), reinterpret_tensor(buf13, (4, 3, 64, 64), (98304, 4096, 64, 1), 36864), reinterpret_tensor(buf13, (4, 3, 64, 64), (98304, 4096, 64, 1), 49152), reinterpret_tensor(buf13, (4, 3, 64, 64), (98304, 4096, 64, 1), 61440), reinterpret_tensor(buf13, (4, 3, 64, 64), (98304, 4096, 64, 1), 73728), reinterpret_tensor(buf13, (4, 3, 64, 64), (98304, 4096, 64, 1), 86016), buf14, buf15, buf18, buf19, buf20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((24, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
class enhance_net_nopool(nn.Module):
def __init__(self):
super(enhance_net_nopool, self).__init__()
self.relu = nn.ReLU(inplace=True)
number_f = 32
self.e_conv1 = nn.Conv2d(3, number_f, 3, 1, 1, bias=True)
self.e_conv2 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True)
self.e_conv3 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True)
self.e_conv4 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True)
self.e_conv5 = nn.Conv2d(number_f * 2, number_f, 3, 1, 1, bias=True)
self.e_conv6 = nn.Conv2d(number_f * 2, number_f, 3, 1, 1, bias=True)
self.e_conv7 = nn.Conv2d(number_f * 2, 24, 3, 1, 1, bias=True)
self.maxpool = nn.MaxPool2d(2, stride=2, return_indices=False,
ceil_mode=False)
self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)
def forward(self, x):
x1 = self.relu(self.e_conv1(x))
x2 = self.relu(self.e_conv2(x1))
x3 = self.relu(self.e_conv3(x2))
x4 = self.relu(self.e_conv4(x3))
x5 = self.relu(self.e_conv5(torch.cat([x3, x4], 1)))
x6 = self.relu(self.e_conv6(torch.cat([x2, x5], 1)))
x_r = F.tanh(self.e_conv7(torch.cat([x1, x6], 1)))
r1, r2, r3, r4, r5, r6, r7, r8 = torch.split(x_r, 3, dim=1)
x = x + r1 * (torch.pow(x, 2) - x)
x = x + r2 * (torch.pow(x, 2) - x)
x = x + r3 * (torch.pow(x, 2) - x)
enhance_image_1 = x + r4 * (torch.pow(x, 2) - x)
x = enhance_image_1 + r5 * (torch.pow(enhance_image_1, 2) -
enhance_image_1)
x = x + r6 * (torch.pow(x, 2) - x)
x = x + r7 * (torch.pow(x, 2) - x)
enhance_image = x + r8 * (torch.pow(x, 2) - x)
r = torch.cat([r1, r2, r3, r4, r5, r6, r7, r8], 1)
return enhance_image_1, enhance_image, r
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 64
x0 = xindex % 4096
x2 = xindex // 262144
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-32 + x1), tmp6, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_tanh_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 24
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_add_mul_pow_sub_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 12288
x1 = xindex // 12288
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + (x0 + 98304 * x1), None)
tmp6 = tl.load(in_ptr1 + (12288 + x0 + 98304 * x1), None)
tmp11 = tl.load(in_ptr1 + (24576 + x0 + 98304 * x1), None)
tmp16 = tl.load(in_ptr1 + (36864 + x0 + 98304 * x1), None)
tmp21 = tl.load(in_ptr1 + (49152 + x0 + 98304 * x1), None)
tmp26 = tl.load(in_ptr1 + (61440 + x0 + 98304 * x1), None)
tmp31 = tl.load(in_ptr1 + (73728 + x0 + 98304 * x1), None)
tmp36 = tl.load(in_ptr1 + (86016 + x0 + 98304 * x1), None)
tmp2 = tmp0 * tmp0
tmp3 = tmp2 - tmp0
tmp4 = tmp1 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 * tmp5
tmp8 = tmp7 - tmp5
tmp9 = tmp6 * tmp8
tmp10 = tmp5 + tmp9
tmp12 = tmp10 * tmp10
tmp13 = tmp12 - tmp10
tmp14 = tmp11 * tmp13
tmp15 = tmp10 + tmp14
tmp17 = tmp15 * tmp15
tmp18 = tmp17 - tmp15
tmp19 = tmp16 * tmp18
tmp20 = tmp15 + tmp19
tmp22 = tmp20 * tmp20
tmp23 = tmp22 - tmp20
tmp24 = tmp21 * tmp23
tmp25 = tmp20 + tmp24
tmp27 = tmp25 * tmp25
tmp28 = tmp27 - tmp25
tmp29 = tmp26 * tmp28
tmp30 = tmp25 + tmp29
tmp32 = tmp30 * tmp30
tmp33 = tmp32 - tmp30
tmp34 = tmp31 * tmp33
tmp35 = tmp30 + tmp34
tmp37 = tmp35 * tmp35
tmp38 = tmp37 - tmp35
tmp39 = tmp36 * tmp38
tmp40 = tmp35 + tmp39
tl.store(out_ptr0 + x2, tmp5, None)
tl.store(out_ptr1 + x2, tmp20, None)
tl.store(out_ptr2 + x2, tmp40, None)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 24
x0 = xindex % 4096
x2 = xindex // 98304
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 98304 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 6, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (12288 + x0 + 4096 * (-3 + x1) + 98304 * x2),
tmp9, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 9, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (24576 + x0 + 4096 * (-6 + x1) + 98304 * x2),
tmp14, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 12, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr0 + (36864 + x0 + 4096 * (-9 + x1) + 98304 * x2),
tmp19, other=0.0)
tmp21 = tmp0 >= tmp17
tmp22 = tl.full([1], 15, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tmp21 & tmp23
tmp25 = tl.load(in_ptr0 + (49152 + x0 + 4096 * (-12 + x1) + 98304 * x2),
tmp24, other=0.0)
tmp26 = tmp0 >= tmp22
tmp27 = tl.full([1], 18, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr0 + (61440 + x0 + 4096 * (-15 + x1) + 98304 * x2),
tmp29, other=0.0)
tmp31 = tmp0 >= tmp27
tmp32 = tl.full([1], 21, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr0 + (73728 + x0 + 4096 * (-18 + x1) + 98304 * x2),
tmp34, other=0.0)
tmp36 = tmp0 >= tmp32
tl.full([1], 24, tl.int64)
tmp39 = tl.load(in_ptr0 + (86016 + x0 + 4096 * (-21 + x1) + 98304 * x2),
tmp36, other=0.0)
tmp40 = tl.where(tmp34, tmp35, tmp39)
tmp41 = tl.where(tmp29, tmp30, tmp40)
tmp42 = tl.where(tmp24, tmp25, tmp41)
tmp43 = tl.where(tmp19, tmp20, tmp42)
tmp44 = tl.where(tmp14, tmp15, tmp43)
tmp45 = tl.where(tmp9, tmp10, tmp44)
tmp46 = tl.where(tmp4, tmp5, tmp45)
tl.store(out_ptr0 + x3, tmp46, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (32,), (1,))
assert_size_stride(primals_12, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_13, (32,), (1,))
assert_size_stride(primals_14, (24, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (24,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(524288)](buf3, primals_5,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(524288)](buf5, primals_7,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_1[grid(1048576)](buf5, buf6, primals_9, buf7,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf9 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_1[grid(1048576)](buf3, buf8, primals_11, buf9,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf11 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_1[grid(1048576)](buf1, buf10, primals_13,
buf11, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 24, 64, 64), (98304, 4096, 64, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_tanh_2[grid(393216)](buf13, primals_15,
393216, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf14 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
buf15 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
buf16 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
triton_poi_fused_add_mul_pow_sub_3[grid(49152)](primals_3, buf13,
buf14, buf15, buf16, 49152, XBLOCK=256, num_warps=4, num_stages=1)
buf17 = empty_strided_cuda((4, 24, 64, 64), (98304, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_4[grid(393216)](buf13, buf17, 393216, XBLOCK=
1024, num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(524288)](
buf10, primals_13, buf18, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf10
del primals_13
buf19 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(524288)](
buf8, primals_11, buf19, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf8
del primals_11
buf20 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(524288)](
buf6, primals_9, buf20, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf6
del primals_9
return (buf15, buf16, buf17, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, buf1, buf3, buf5,
buf7, buf9, buf11, buf13, reinterpret_tensor(buf13, (4, 3, 64, 64),
(98304, 4096, 64, 1), 12288), reinterpret_tensor(buf13, (4, 3, 64,
64), (98304, 4096, 64, 1), 24576), reinterpret_tensor(buf13, (4, 3,
64, 64), (98304, 4096, 64, 1), 36864), reinterpret_tensor(buf13, (4,
3, 64, 64), (98304, 4096, 64, 1), 49152), reinterpret_tensor(buf13,
(4, 3, 64, 64), (98304, 4096, 64, 1), 61440), reinterpret_tensor(
buf13, (4, 3, 64, 64), (98304, 4096, 64, 1), 73728),
reinterpret_tensor(buf13, (4, 3, 64, 64), (98304, 4096, 64, 1),
86016), buf14, buf15, buf18, buf19, buf20)
class enhance_net_nopoolNew(nn.Module):
def __init__(self):
super(enhance_net_nopoolNew, self).__init__()
self.relu = nn.ReLU(inplace=True)
number_f = 32
self.e_conv1 = nn.Conv2d(3, number_f, 3, 1, 1, bias=True)
self.e_conv2 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True)
self.e_conv3 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True)
self.e_conv4 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True)
self.e_conv5 = nn.Conv2d(number_f * 2, number_f, 3, 1, 1, bias=True)
self.e_conv6 = nn.Conv2d(number_f * 2, number_f, 3, 1, 1, bias=True)
self.e_conv7 = nn.Conv2d(number_f * 2, 24, 3, 1, 1, bias=True)
self.maxpool = nn.MaxPool2d(2, stride=2, return_indices=False,
ceil_mode=False)
self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)
def forward(self, input_0):
primals_1 = self.e_conv1.weight
primals_2 = self.e_conv1.bias
primals_4 = self.e_conv2.weight
primals_5 = self.e_conv2.bias
primals_6 = self.e_conv3.weight
primals_7 = self.e_conv3.bias
primals_8 = self.e_conv4.weight
primals_9 = self.e_conv4.bias
primals_10 = self.e_conv5.weight
primals_11 = self.e_conv5.bias
primals_12 = self.e_conv6.weight
primals_13 = self.e_conv6.bias
primals_14 = self.e_conv7.weight
primals_15 = self.e_conv7.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0], output[1], output[2]
| farhantandia/Applied-CV-Zero-DCE-master | enhance_net_nopool | false | 6,704 | [
"MIT"
] | 1 | 56a0f8aec799eb5d125f5d9f44f692b9a9a3c990 | https://github.com/farhantandia/Applied-CV-Zero-DCE-master/tree/56a0f8aec799eb5d125f5d9f44f692b9a9a3c990 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
class Model(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU(inplace=True)
number_f = 32
self.e_conv1 = nn.Conv2d(3, number_f, 3, 1, 1, bias=True)
self.e_conv2 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True)
self.e_conv3 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True)
self.e_conv4 = nn.Conv2d(number_f, number_f, 3, 1, 1, bias=True)
self.e_conv5 = nn.Conv2d(number_f * 2, number_f, 3, 1, 1, bias=True)
self.e_conv6 = nn.Conv2d(number_f * 2, number_f, 3, 1, 1, bias=True)
self.e_conv7 = nn.Conv2d(number_f * 2, 24, 3, 1, 1, bias=True)
self.maxpool = nn.MaxPool2d(2, stride=2, return_indices=False,
ceil_mode=False)
self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)
def forward(self, x):
x1 = self.relu(self.e_conv1(x))
x2 = self.relu(self.e_conv2(x1))
x3 = self.relu(self.e_conv3(x2))
x4 = self.relu(self.e_conv4(x3))
x5 = self.relu(self.e_conv5(torch.cat([x3, x4], 1)))
x6 = self.relu(self.e_conv6(torch.cat([x2, x5], 1)))
x_r = F.tanh(self.e_conv7(torch.cat([x1, x6], 1)))
r1, r2, r3, r4, r5, r6, r7, r8 = torch.split(x_r, 3, dim=1)
x = x + r1 * (torch.pow(x, 2) - x)
x = x + r2 * (torch.pow(x, 2) - x)
x = x + r3 * (torch.pow(x, 2) - x)
enhance_image_1 = x + r4 * (torch.pow(x, 2) - x)
x = enhance_image_1 + r5 * (torch.pow(enhance_image_1, 2) -
enhance_image_1)
x = x + r6 * (torch.pow(x, 2) - x)
x = x + r7 * (torch.pow(x, 2) - x)
enhance_image = x + r8 * (torch.pow(x, 2) - x)
r = torch.cat([r1, r2, r3, r4, r5, r6, r7, r8], 1)
return enhance_image_1, enhance_image, r
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
ActorCriticContinuous | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/do/cdo22no4lmipk7byduyah2xsadvdcbfr22puoptl5br3l66r6jra.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bt/cbtvgwg2q2cbim24ouk4jhhk3xdk5o76hlakhhwzvjmkxuoy3vjv.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.01), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_3, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/te/cterrdmunrbsbsiujkukretmu2dvprfqhejymivorqkzug35erzt.py
# Topologically Sorted Source Nodes: [add, action_sigma], Original ATen: [aten.add, aten.softplus]
# Source node to ATen node mapping:
# action_sigma => exp, gt_2, log1p, where_2
# add => add
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, 0.0001), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%add,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 20), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add, %log1p), kwargs = {})
triton_poi_fused_add_softplus_2 = async_compile.triton('triton_poi_fused_add_softplus_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_softplus_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_softplus_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0001
tmp2 = tmp0 + tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 4), (4, 1))
assert_size_stride(primals_5, (2, ), (1, ))
assert_size_stride(primals_6, (1, 2), (2, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (4, 2), (2, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 2), (2, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 128, grid=grid(128), stream=stream0)
del buf3
del primals_5
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [state_value], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 2), (2, 1), 0), reinterpret_tensor(primals_6, (2, 1), (1, 2), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [action_mean], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2), (2, 1), 0), reinterpret_tensor(primals_8, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf5, (64, 2), (2, 1), 0), reinterpret_tensor(primals_10, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf9)
del primals_11
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, action_sigma], Original ATen: [aten.add, aten.softplus]
triton_poi_fused_add_softplus_2.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf10, reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (64, 2), (2, 1), 0), buf9, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class ActorCriticContinuous(nn.Module):
"""
Actor-Critic for continuous action spaces. The network returns a state_value (critic) and
action mean and action standarddeviation (actor). The action is the sampled from a normal
distribution with mean and std given by the actor.
"""
def __init__(self, action_dim, state_dim, hidden_dim):
super(ActorCriticContinuous, self).__init__()
self.fc_1 = nn.Linear(state_dim, hidden_dim)
self.fc_2 = nn.Linear(hidden_dim, int(hidden_dim / 2))
self.critic_head = nn.Linear(int(hidden_dim / 2), 1)
self.actor_head_mean = nn.Linear(int(hidden_dim / 2), action_dim)
self.actor_head_sigma = nn.Linear(int(hidden_dim / 2), action_dim)
def forward(self, inp):
x = F.leaky_relu(self.fc_1(inp))
x = F.leaky_relu(self.fc_2(x))
state_value = self.critic_head(x)
action_mean = self.actor_head_mean(x)
action_sigma = F.softplus(self.actor_head_sigma(x) + 0.0001)
return action_mean, action_sigma, state_value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'action_dim': 4, 'state_dim': 4, 'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_softplus_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0001
tmp2 = tmp0 + tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 4), (4, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (1, 2), (2, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (4, 2), (2, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 2), (2, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(128)](buf3, primals_5, buf4,
buf5, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_5
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_6, (2, 1), (1, 2), 0),
alpha=1, beta=1, out=buf7)
del primals_7
buf8 = buf0
del buf0
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_8, (2, 4), (1, 2), 0),
alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf5, (64, 2),
(2, 1), 0), reinterpret_tensor(primals_10, (2, 4), (1, 2), 0),
alpha=1, beta=1, out=buf9)
del primals_11
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_softplus_2[grid(256)](buf9, buf10, 256, XBLOCK
=256, num_warps=4, num_stages=1)
return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf10, reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 2), (2, 1), 0
), buf9, primals_10, primals_8, primals_6, primals_4
class ActorCriticContinuousNew(nn.Module):
"""
Actor-Critic for continuous action spaces. The network returns a state_value (critic) and
action mean and action standarddeviation (actor). The action is the sampled from a normal
distribution with mean and std given by the actor.
"""
def __init__(self, action_dim, state_dim, hidden_dim):
super(ActorCriticContinuousNew, self).__init__()
self.fc_1 = nn.Linear(state_dim, hidden_dim)
self.fc_2 = nn.Linear(hidden_dim, int(hidden_dim / 2))
self.critic_head = nn.Linear(int(hidden_dim / 2), 1)
self.actor_head_mean = nn.Linear(int(hidden_dim / 2), action_dim)
self.actor_head_sigma = nn.Linear(int(hidden_dim / 2), action_dim)
def forward(self, input_0):
primals_1 = self.fc_1.weight
primals_2 = self.fc_1.bias
primals_4 = self.fc_2.weight
primals_5 = self.fc_2.bias
primals_6 = self.critic_head.weight
primals_7 = self.critic_head.bias
primals_8 = self.actor_head_mean.weight
primals_9 = self.actor_head_mean.bias
primals_10 = self.actor_head_sigma.weight
primals_11 = self.actor_head_sigma.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2]
| fschur/Advantage-Actor-Critic-for-OpenAi-gym | ActorCriticContinuous | false | 6,705 | [
"MIT"
] | 1 | c130038789425301684825e09e77f17e89d21859 | https://github.com/fschur/Advantage-Actor-Critic-for-OpenAi-gym/tree/c130038789425301684825e09e77f17e89d21859 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Actor-Critic for continuous action spaces. The network returns a state_value (critic) and
action mean and action standarddeviation (actor). The action is the sampled from a normal
distribution with mean and std given by the actor.
"""
def __init__(self, action_dim, state_dim, hidden_dim):
super().__init__()
self.fc_1 = nn.Linear(state_dim, hidden_dim)
self.fc_2 = nn.Linear(hidden_dim, int(hidden_dim / 2))
self.critic_head = nn.Linear(int(hidden_dim / 2), 1)
self.actor_head_mean = nn.Linear(int(hidden_dim / 2), action_dim)
self.actor_head_sigma = nn.Linear(int(hidden_dim / 2), action_dim)
def forward(self, inp):
x = F.leaky_relu(self.fc_1(inp))
x = F.leaky_relu(self.fc_2(x))
state_value = self.critic_head(x)
action_mean = self.actor_head_mean(x)
action_sigma = F.softplus(self.actor_head_sigma(x) + 0.0001)
return action_mean, action_sigma, state_value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Policy | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wj/cwjhqtccibtjmvk6idu2u5cqmpyduromw4a6pzioh3adfwjeh5mj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 23104
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/il/cil55xjuwkqyzxjlhvd4kizvr326ffq3shsi7xhiipir6ovlmzzy.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_3, %primals_4, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 5184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 81) % 16
x2 = (xindex // 1296)
x3 = xindex % 1296
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (1312*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1408*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2b/c2bujjyeji7nhf4gfgxav4unhmpugynzwx2v63uhk7lp4nn5exsa.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_6), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rh/crhvyy3w3uejbzndu7qftnyc25sndrfzlmb3i2bzpyadobz7z7bm.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 6, 6), (72, 36, 6, 1))
assert_size_stride(primals_2, (4, 2, 81, 81), (13122, 6561, 81, 1))
assert_size_stride(primals_3, (16, 4, 6, 6), (144, 36, 6, 1))
assert_size_stride(primals_4, (16, ), (1, ))
assert_size_stride(primals_5, (256, 1296), (1296, 1))
assert_size_stride(primals_6, (256, ), (1, ))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 38, 38), (5776, 1444, 38, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 23104, grid=grid(23104), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 9, 9), (1296, 81, 9, 1))
buf3 = empty_strided_cuda((4, 16, 9, 9), (1312, 81, 9, 1), torch.float32)
buf8 = empty_strided_cuda((4, 16, 9, 9), (1408, 81, 9, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf2, primals_4, buf3, buf8, 5184, grid=grid(5184), stream=stream0)
del buf2
del primals_4
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (4, 1296), (1312, 1), 0), reinterpret_tensor(primals_5, (1296, 256), (1, 1296), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf5, primals_6, 1024, grid=grid(1024), stream=stream0)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_3.run(buf7, primals_8, 4, grid=grid(4), stream=stream0)
del primals_8
return (buf7, primals_1, primals_2, primals_3, buf1, reinterpret_tensor(buf3, (4, 1296), (1312, 1), 0), buf5, buf7, primals_7, primals_5, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 2, 6, 6), (72, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 2, 81, 81), (13122, 6561, 81, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, 1296), (1296, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.conv1 = nn.Conv2d(2, 4, kernel_size=6, stride=2, bias=False)
self.conv2 = nn.Conv2d(4, 16, kernel_size=6, stride=4)
self.size = 9 * 9 * 16
self.fc1 = nn.Linear(self.size, 256)
self.fc2 = nn.Linear(256, 1)
self.sig = nn.Sigmoid()
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(-1, self.size)
x = F.relu(self.fc1(x))
return self.sig(self.fc2(x))
def get_inputs():
return [torch.rand([4, 2, 81, 81])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 23104
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 5184
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 81 % 16
x2 = xindex // 1296
x3 = xindex % 1296
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1312 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1408 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 2, 6, 6), (72, 36, 6, 1))
assert_size_stride(primals_2, (4, 2, 81, 81), (13122, 6561, 81, 1))
assert_size_stride(primals_3, (16, 4, 6, 6), (144, 36, 6, 1))
assert_size_stride(primals_4, (16,), (1,))
assert_size_stride(primals_5, (256, 1296), (1296, 1))
assert_size_stride(primals_6, (256,), (1,))
assert_size_stride(primals_7, (1, 256), (256, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 38, 38), (5776, 1444, 38, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(23104)](buf1, 23104, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(4, 4),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 9, 9), (1296, 81, 9, 1))
buf3 = empty_strided_cuda((4, 16, 9, 9), (1312, 81, 9, 1), torch.
float32)
buf8 = empty_strided_cuda((4, 16, 9, 9), (1408, 81, 9, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(5184)](buf2
, primals_4, buf3, buf8, 5184, XBLOCK=256, num_warps=4,
num_stages=1)
del buf2
del primals_4
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (4, 1296), (1312, 1), 0),
reinterpret_tensor(primals_5, (1296, 256), (1, 1296), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(1024)](buf5, primals_6, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_6
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (256, 1), (1,
256), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_sigmoid_3[grid(4)](buf7, primals_8, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_8
return buf7, primals_1, primals_2, primals_3, buf1, reinterpret_tensor(buf3
, (4, 1296), (1312, 1), 0), buf5, buf7, primals_7, primals_5, buf8
class PolicyNew(nn.Module):
def __init__(self):
super(PolicyNew, self).__init__()
self.conv1 = nn.Conv2d(2, 4, kernel_size=6, stride=2, bias=False)
self.conv2 = nn.Conv2d(4, 16, kernel_size=6, stride=4)
self.size = 9 * 9 * 16
self.fc1 = nn.Linear(self.size, 256)
self.fc2 = nn.Linear(256, 1)
self.sig = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv2.bias
primals_5 = self.fc1.weight
primals_6 = self.fc1.bias
primals_7 = self.fc2.weight
primals_8 = self.fc2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| francescotorregrossa/deep-reinforcement-learning-nanodegree | Policy | false | 6,706 | [
"MIT"
] | 1 | 396648570aa53c9e727a8de69175e4a139d4ded5 | https://github.com/francescotorregrossa/deep-reinforcement-learning-nanodegree/tree/396648570aa53c9e727a8de69175e4a139d4ded5 | import torch
import torch.nn.functional as F
from torch import nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(2, 4, kernel_size=6, stride=2, bias=False)
self.conv2 = nn.Conv2d(4, 16, kernel_size=6, stride=4)
self.size = 9 * 9 * 16
self.fc1 = nn.Linear(self.size, 256)
self.fc2 = nn.Linear(256, 1)
self.sig = nn.Sigmoid()
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(-1, self.size)
x = F.relu(self.fc1(x))
return self.sig(self.fc2(x))
def get_inputs():
return [torch.rand([4, 2, 81, 81])]
def get_init_inputs():
return []
|
EqualConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/mr/cmrzofxtfa5fe3ax4o3n5qvgpvhbgcrspjauzarmp4t443npav4h.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.1767766952966369), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.1767766952966369
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %mul, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf2, buf0, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.1767766952966369
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, buf0, primals_3, buf0
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualConv2dNew(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input_0):
primals_2 = self.conv.bias
primals_1 = self.conv.weight_orig
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| g33sean/RTIL | EqualConv2d | false | 6,707 | [
"BSD-2-Clause",
"MIT"
] | 1 | 5325f6d5e3ddf7579b6bd8199898e00eff3da631 | https://github.com/g33sean/RTIL/tree/5325f6d5e3ddf7579b6bd8199898e00eff3da631 | import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class Model(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Feedforward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jq/cjqaq2meov3vkcgfealq7w4w35tw2oemvmhneuxmigeoumva22p7.py
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# y_1 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class Feedforward(torch.nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super(Feedforward, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.fc3 = torch.nn.Linear(self.hidden_size, self.output_size)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
y = self.fc1(x)
y = self.sigmoid(y)
y = self.fc2(y)
y = self.sigmoid(y)
y = self.fc3(y)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, primals_2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_0[grid(256)](buf3, primals_5, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class FeedforwardNew(torch.nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super(FeedforwardNew, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.fc3 = torch.nn.Linear(self.hidden_size, self.output_size)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| fywu85/eecs206a_project | Feedforward | false | 6,708 | [
"MIT"
] | 1 | 73ea518779da4d187df8bbe4cbe46bca6d1a0714 | https://github.com/fywu85/eecs206a_project/tree/73ea518779da4d187df8bbe4cbe46bca6d1a0714 | import torch
class Model(torch.nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.fc3 = torch.nn.Linear(self.hidden_size, self.output_size)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
y = self.fc1(x)
y = self.sigmoid(y)
y = self.fc2(y)
y = self.sigmoid(y)
y = self.fc3(y)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ActorCriticDiscrete | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/do/cdo22no4lmipk7byduyah2xsadvdcbfr22puoptl5br3l66r6jra.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bt/cbtvgwg2q2cbim24ouk4jhhk3xdk5o76hlakhhwzvjmkxuoy3vjv.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.01), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_3, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tt/cttmvktt3m2x2nl56afa7l3abaxt7wlehowakdzngkhgs35f3n7u.py
# Topologically Sorted Source Nodes: [action_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# action_prob => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_7, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_7, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ry/cryn7ntc2gpkbfzbre3xh7lffx7zkbskw6oihbzsekkgajmdbki6.py
# Topologically Sorted Source Nodes: [action_prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# action_prob => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 4), (4, 1))
assert_size_stride(primals_5, (2, ), (1, ))
assert_size_stride(primals_6, (1, 2), (2, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (4, 2), (2, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 128, grid=grid(128), stream=stream0)
del buf3
del primals_5
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [state_value], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 2), (2, 1), 0), reinterpret_tensor(primals_6, (2, 1), (1, 2), 0), alpha=1, beta=1, out=buf7)
del primals_7
buf8 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2), (2, 1), 0), reinterpret_tensor(primals_8, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [action_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf8, buf9, 256, grid=grid(256), stream=stream0)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [action_prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
del buf9
return (buf10, reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (64, 2), (2, 1), 0), buf10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class ActorCriticDiscrete(nn.Module):
"""
Actor-Critic for discrete action spaces. The network returns a state_value (critic)and action probabilities (actor).
"""
def __init__(self, action_dim, state_dim, hidden_dim):
super(ActorCriticDiscrete, self).__init__()
self.fc_1 = nn.Linear(state_dim, hidden_dim)
self.fc_2 = nn.Linear(hidden_dim, int(hidden_dim / 2))
self.critic_head = nn.Linear(int(hidden_dim / 2), 1)
self.actor_head = nn.Linear(int(hidden_dim / 2), action_dim)
def forward(self, inp):
x = F.leaky_relu(self.fc_1(inp))
x = F.leaky_relu(self.fc_2(x))
state_value = self.critic_head(x)
action_prob = F.softmax(self.actor_head(x), dim=-1)
return action_prob, state_value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'action_dim': 4, 'state_dim': 4, 'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 4), (4, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (1, 2), (2, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (4, 2), (2, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(128)](buf3, primals_5, buf4,
buf5, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_5
buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf5, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_6, (2, 1), (1, 2), 0),
alpha=1, beta=1, out=buf7)
del primals_7
buf8 = buf0
del buf0
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2), (
2, 1), 0), reinterpret_tensor(primals_8, (2, 4), (1, 2), 0),
alpha=1, beta=1, out=buf8)
del primals_9
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf8, buf9, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__softmax_3[grid(256)](buf9, buf10, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf9
return buf10, reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf5, (64, 2), (2, 1), 0
), buf10, primals_8, primals_6, primals_4
class ActorCriticDiscreteNew(nn.Module):
"""
Actor-Critic for discrete action spaces. The network returns a state_value (critic)and action probabilities (actor).
"""
def __init__(self, action_dim, state_dim, hidden_dim):
super(ActorCriticDiscreteNew, self).__init__()
self.fc_1 = nn.Linear(state_dim, hidden_dim)
self.fc_2 = nn.Linear(hidden_dim, int(hidden_dim / 2))
self.critic_head = nn.Linear(int(hidden_dim / 2), 1)
self.actor_head = nn.Linear(int(hidden_dim / 2), action_dim)
def forward(self, input_0):
primals_1 = self.fc_1.weight
primals_2 = self.fc_1.bias
primals_4 = self.fc_2.weight
primals_5 = self.fc_2.bias
primals_6 = self.critic_head.weight
primals_7 = self.critic_head.bias
primals_8 = self.actor_head.weight
primals_9 = self.actor_head.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| fschur/Advantage-Actor-Critic-for-OpenAi-gym | ActorCriticDiscrete | false | 6,709 | [
"MIT"
] | 1 | c130038789425301684825e09e77f17e89d21859 | https://github.com/fschur/Advantage-Actor-Critic-for-OpenAi-gym/tree/c130038789425301684825e09e77f17e89d21859 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Actor-Critic for discrete action spaces. The network returns a state_value (critic)and action probabilities (actor).
"""
def __init__(self, action_dim, state_dim, hidden_dim):
super().__init__()
self.fc_1 = nn.Linear(state_dim, hidden_dim)
self.fc_2 = nn.Linear(hidden_dim, int(hidden_dim / 2))
self.critic_head = nn.Linear(int(hidden_dim / 2), 1)
self.actor_head = nn.Linear(int(hidden_dim / 2), action_dim)
def forward(self, inp):
x = F.leaky_relu(self.fc_1(inp))
x = F.leaky_relu(self.fc_2(x))
state_value = self.critic_head(x)
action_prob = F.softmax(self.actor_head(x), dim=-1)
return action_prob, state_value
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
EqualLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/oy/coy4v6ev22tv33nc6asaz3obrskaw2f3vho4q3aj4yqpth7c2y2m.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.7071067811865476), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf0, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.7071067811865476
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf0, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualLinearNew(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input_0):
primals_2 = self.linear.bias
primals_1 = self.linear.weight_orig
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| g33sean/RTIL | EqualLinear | false | 6,710 | [
"BSD-2-Clause",
"MIT"
] | 1 | 5325f6d5e3ddf7579b6bd8199898e00eff3da631 | https://github.com/g33sean/RTIL/tree/5325f6d5e3ddf7579b6bd8199898e00eff3da631 | import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class Model(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
linear = nn.Linear(in_dim, out_dim)
linear.weight.data.normal_()
linear.bias.data.zero_()
self.linear = equal_lr(linear)
def forward(self, input):
return self.linear(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
UNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jo/cjolh7wy3losq75bea7heuxra52smjn2phczl4xzt2smarbxy3nj.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/o4/co4xz3bhphdn2kq3lke3433wpdtqt6r3irqbdr7hp46ou2slvxop.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_1 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_1, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/b2/cb2heynzbbb2idhib26qs23x62rr3vu36ahp3tksyhjfahippc67.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_6, %primals_7, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.1), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xj/cxjbrfzbed7bo2iy4m5zuii5z5cssze6tfcgrk2jehpphz5b77jh.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_4 => avg_pool2d_1
# Graph fragment:
# %avg_pool2d_1 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_3, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/le/cleento7jh4h7b7b25wgw4ax6qfmthojxlfqfgkaohjqgn6pqwco.py
# Topologically Sorted Source Nodes: [conv2d_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_5 => gt_4, mul_4, where_4
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_1, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.1), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {})
triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/s5/cs5zukgmdewmnpcrozw2m273bpclzrkypvc2xaub2gmoc5saabvv.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_7 => avg_pool2d_2
# Graph fragment:
# %avg_pool2d_2 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_5, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_5 = async_compile.triton('triton_poi_fused_avg_pool2d_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4u/c4urfqsk2wuyrkcnwy7b2uiwmecrugesubdiuadavwqtcisyhwz4.py
# Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# x_8 => gt_6, mul_6, where_6
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_2, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_6 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.1), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_6), kwargs = {})
triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3z/c3zje6b5ccaz3n4winpmxo6y4niaoldocb7ilvkg5sorj2nqvjfa.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_10 => avg_pool2d_3
# Graph fragment:
# %avg_pool2d_3 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_7, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_7 = async_compile.triton('triton_poi_fused_avg_pool2d_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/oy/coyjgpdjbipe737iasihk5ensjtmspgnzblyyy7mrlypqho5vuyg.py
# Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_8 => convolution_8
# x_11 => gt_8, mul_8, where_8
# Graph fragment:
# %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_3, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_8 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 0.1), kwargs = {})
# %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_8, %mul_8), kwargs = {})
triton_poi_fused_convolution_leaky_relu_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/j6/cj6wkwoaluxhwqnux44qht6o5xye6n3bfqi54esxnpytd6m2qyjn.py
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_13 => avg_pool2d_4
# Graph fragment:
# %avg_pool2d_4 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_9, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_9 = async_compile.triton('triton_poi_fused_avg_pool2d_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/i6/ci6sgepehwucwp2knnf7ujr55xjh7bis2i3kdyon6flrsjhhdhyi.py
# Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_10 => convolution_10
# x_14 => gt_10, mul_10, where_10
# Graph fragment:
# %convolution_10 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_4, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_10 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_10, 0), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_10, 0.1), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_10, %convolution_10, %mul_10), kwargs = {})
triton_poi_fused_convolution_leaky_relu_10 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ep/cepxp5elnxw6qvzcibzdejr6ov2i7hn664ixt6w4vzlrorsdstiq.py
# Topologically Sorted Source Nodes: [conv2d_11, x_15], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_11 => convolution_11
# x_15 => gt_11
# Graph fragment:
# %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_11 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_11, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_11 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/r4/cr4hpkpezpllmtrycjdjyfyalsg3igxkpp5ddup6ueansg3uhioj.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_16 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_12 = async_compile.triton('triton_poi_fused__to_copy_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/it/citeiab2byvsltguyuzd2s2joq6e6z355s7h7bam6hgio5s5cret.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_16 => add_1, clamp_max
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 1), kwargs = {})
triton_poi_fused_add_clamp_13 = async_compile.triton('triton_poi_fused_add_clamp_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = triton_helpers.minimum(tmp10, tmp9)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tq/ctqegi24pxetbae63246pykqjalfftx6xr5vt4fhudr7ehpmpbyv.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_16 => add, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul_12, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_12, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ar/carrwnezxoitom5qyzrwvxxe2xsarer32dfybfdk3w4gttj5i277.py
# Topologically Sorted Source Nodes: [conv2d_11, x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_11 => convolution_11
# x_15 => mul_11, where_11
# x_16 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_4, add_5, add_6, mul_14, mul_15, mul_16, sub_3, sub_4, sub_6
# Graph fragment:
# %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_11, 0.1), kwargs = {})
# %where_11 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_11, %convolution_11, %mul_11), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_14), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_15), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_16), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4) % 4
x0 = xindex % 4
x6 = (xindex // 16)
x2 = (xindex // 16) % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vm/cvmoqavquuan3erpml2tllmmlw2pfct5mokbplbbjboljuyvw7db.py
# Topologically Sorted Source Nodes: [conv2d_12, x_17], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_12 => convolution_12
# x_17 => gt_12
# Graph fragment:
# %convolution_12 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_6, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_12 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_12, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_16 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zq/czqy62awvhrtl5r6fvvk4ufd5wffutbs7uz3a6rvpxyaj5tosmne.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_12, %where_9], 1), kwargs = {})
triton_poi_fused_cat_17 = async_compile.triton('triton_poi_fused_cat_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 1024
x0 = xindex % 16
x2 = (xindex // 16384)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (8192*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (16*x1) + (8192*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 1024, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (16*((-512) + x1)) + (8192*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6w/c6wr4loz4rxs56x2er2z7yyokvbxpzsmffbuifvku2xqaerh75p3.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_19 => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {})
triton_poi_fused__to_copy_18 = async_compile.triton('triton_poi_fused__to_copy_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_18(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/oq/coq7wg2jvtpm4oc4zm4dvbkwpc5jinzscvm4jrouu3hlob5nd7rs.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_19 => add_8, clamp_max_4
# Graph fragment:
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_8, 3), kwargs = {})
triton_poi_fused_add_clamp_19 = async_compile.triton('triton_poi_fused_add_clamp_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_19(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rh/crhh5ib7z3hkisro2vncldz577bevkgu7k3u5nnclrqmgo3wnuzx.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_19 => add_7, clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_19, sub_7, sub_9
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.5), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 0.5), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_19, 0.5), kwargs = {})
# %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_7, 0.0), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qk/cqkzmxtnx34qktmz7vfilm2bzsfk2r5cxo3wx6pwurql3crkpejs.py
# Topologically Sorted Source Nodes: [conv2d_13, x_18, x_19], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# x_18 => mul_18, where_13
# x_19 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_11, add_12, add_13, mul_21, mul_22, mul_23, sub_10, sub_11, sub_13
# Graph fragment:
# %convolution_13 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_13, 0.1), kwargs = {})
# %where_13 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_13, %convolution_13, %mul_18), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_6), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_21), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_6), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_22), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_12, %add_11), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %clamp_max_7), kwargs = {})
# %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mul_23), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 8) % 8
x0 = xindex % 8
x6 = (xindex // 64)
x2 = (xindex // 64) % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vn/cvntemv5weoouv65lvun2muyb6apyj7dkrnebouaithxvdyd4hl4.py
# Topologically Sorted Source Nodes: [conv2d_14, x_20], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_14 => convolution_14
# x_20 => gt_14
# Graph fragment:
# %convolution_14 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_13, %primals_30, %primals_31, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_14 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_14, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_22 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3p/c3pazcbmhoubkrcj7s65glics5kpj5vv7x2zlnvymydp46fxyf2m.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_14, %where_7], 1), kwargs = {})
triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 512
x0 = xindex % 64
x2 = (xindex // 32768)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 512, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (64*((-256) + x1)) + (16384*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mi/cmiwjqieuspwn256jnrugfvht2dt7ofln2psibayqc3twrtpkngi.py
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_22 => convert_element_type_9
# Graph fragment:
# %convert_element_type_9 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {})
triton_poi_fused__to_copy_24 = async_compile.triton('triton_poi_fused__to_copy_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ki/ckith5u474vepvwaijseaqbn665u5jpg5stc4cj42bnzsgj6uexm.py
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_22 => add_15, clamp_max_8
# Graph fragment:
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_9, 1), kwargs = {})
# %clamp_max_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_15, 7), kwargs = {})
triton_poi_fused_add_clamp_25 = async_compile.triton('triton_poi_fused_add_clamp_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 7, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lu/cluyprd6omil4csahwtbdldnx2kt7j7znt35dzjdzj4xcxjsppaa.py
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_22 => add_14, clamp_max_10, clamp_min_10, clamp_min_8, convert_element_type_8, iota_4, mul_26, sub_14, sub_16
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_4, torch.float32), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.5), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_14, 0.5), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_26, 0.5), kwargs = {})
# %clamp_min_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_14, 0.0), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_8, %convert_element_type_11), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_16, 0.0), kwargs = {})
# %clamp_max_10 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qc/cqcxmomsd2pfozktj753ije5uupmwdotvhhglolxtdikeyegh5yz.py
# Topologically Sorted Source Nodes: [conv2d_15, x_21, x_22], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_15 => convolution_15
# x_21 => mul_25, where_15
# x_22 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_18, add_19, add_20, mul_28, mul_29, mul_30, sub_17, sub_18, sub_20
# Graph fragment:
# %convolution_15 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_15, 0.1), kwargs = {})
# %where_15 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_15, %convolution_15, %mul_25), kwargs = {})
# %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {})
# %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_10), kwargs = {})
# %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_28), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %clamp_max_10), kwargs = {})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_29), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_19, %add_18), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %clamp_max_11), kwargs = {})
# %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_18, %mul_30), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x6 = (xindex // 256)
x2 = (xindex // 256) % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2n/c2ncutq26bahxlgkdh4rlnvyr47bwimc4zzutvjxr5n6y6efndwb.py
# Topologically Sorted Source Nodes: [conv2d_16, x_23], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_16 => convolution_16
# x_23 => gt_16
# Graph fragment:
# %convolution_16 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_20, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_16 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_16, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_28 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/oe/coeffqdqijre65ihx7pvd5fl3wkvhaztzhmg43n5sxftyfhfnesu.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_16, %where_5], 1), kwargs = {})
triton_poi_fused_cat_29 = async_compile.triton('triton_poi_fused_cat_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 256
x0 = xindex % 256
x2 = (xindex // 65536)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 256, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (256*((-128) + x1)) + (32768*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kk/ckknliedqrn55tdjnurtw2wmbhy4m7nftlest3rkcxytrug6sjjb.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_25 => convert_element_type_13
# Graph fragment:
# %convert_element_type_13 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_6, torch.int64), kwargs = {})
triton_poi_fused__to_copy_30 = async_compile.triton('triton_poi_fused__to_copy_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_30', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_30(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yi/cyid3ait3xdgmowp4yeresvz4pwdwiylxhglyhgg7hauo5drkgwr.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_25 => add_22, clamp_max_12
# Graph fragment:
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_13, 1), kwargs = {})
# %clamp_max_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_22, 15), kwargs = {})
triton_poi_fused_add_clamp_31 = async_compile.triton('triton_poi_fused_add_clamp_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_31(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 15, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2b/c2bhdwgkdtsx66umnkggdw7khh2c5wl4ogab34mcyesmsvh7u75z.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_25 => add_21, clamp_max_14, clamp_min_12, clamp_min_14, convert_element_type_12, iota_6, mul_33, sub_21, sub_23
# Graph fragment:
# %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_6, torch.float32), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_12, 0.5), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, 0.5), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_33, 0.5), kwargs = {})
# %clamp_min_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_21, 0.0), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_12, %convert_element_type_15), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_23, 0.0), kwargs = {})
# %clamp_max_14 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_14, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/74/c74xhgb6wezra6e6cyzbt6yotgcl7i7n4p3es6nogdaczlcdlrl4.py
# Topologically Sorted Source Nodes: [conv2d_17, x_24, x_25], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_17 => convolution_17
# x_24 => mul_32, where_17
# x_25 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_25, add_26, add_27, mul_35, mul_36, mul_37, sub_24, sub_25, sub_27
# Graph fragment:
# %convolution_17 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_36, %primals_37, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_17, 0.1), kwargs = {})
# %where_17 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_17, %convolution_17, %mul_32), kwargs = {})
# %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %convert_element_type_13, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %convert_element_type_13, %clamp_max_13]), kwargs = {})
# %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %clamp_max_12, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %clamp_max_14), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_35), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {})
# %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %clamp_max_14), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_36), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_26, %add_25), kwargs = {})
# %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %clamp_max_15), kwargs = {})
# %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %mul_37), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x6 = (xindex // 1024)
x2 = (xindex // 1024) % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hu/chuh444ehiqujq3pobg3s6kf4jk3jfs66ff4yxzuqyv7z7gvdw4l.py
# Topologically Sorted Source Nodes: [conv2d_18, x_26], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_18 => convolution_18
# x_26 => gt_18
# Graph fragment:
# %convolution_18 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_27, %primals_38, %primals_39, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_18 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_18, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_34 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_34', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_34(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wo/cwov5xjz2rgypru6odo5shttzkvjzbv2j5h765xadmngxefsg27w.py
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_18, %where_3], 1), kwargs = {})
triton_poi_fused_cat_35 = async_compile.triton('triton_poi_fused_cat_35', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 1024) % 128
x0 = xindex % 1024
x2 = (xindex // 131072)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 128, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (1024*((-64) + x1)) + (65536*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dy/cdyi77rlf5lxwibfyd5p2m432vdqftcfdpn6tuqyv7hyajbxkkvo.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_28 => convert_element_type_17
# Graph fragment:
# %convert_element_type_17 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_8, torch.int64), kwargs = {})
triton_poi_fused__to_copy_36 = async_compile.triton('triton_poi_fused__to_copy_36', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_36', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_36(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l5/cl5xfazaijdiktz5n5gqb2xvmg6f5wpggcjdlnx676ib5wqnt2bo.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_28 => add_29, clamp_max_16
# Graph fragment:
# %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_17, 1), kwargs = {})
# %clamp_max_16 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_29, 31), kwargs = {})
triton_poi_fused_add_clamp_37 = async_compile.triton('triton_poi_fused_add_clamp_37', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_37', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_37(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zw/czwrbkjbdq3qzzeebsz3vxkktcfyv5fp5csjdanc2n4yhokgkzxs.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_28 => add_28, clamp_max_18, clamp_min_16, clamp_min_18, convert_element_type_16, iota_8, mul_40, sub_28, sub_30
# Graph fragment:
# %iota_8 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_16 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_8, torch.float32), kwargs = {})
# %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_16, 0.5), kwargs = {})
# %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_28, 0.5), kwargs = {})
# %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_40, 0.5), kwargs = {})
# %clamp_min_16 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_28, 0.0), kwargs = {})
# %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_16, %convert_element_type_19), kwargs = {})
# %clamp_min_18 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_30, 0.0), kwargs = {})
# %clamp_max_18 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_18, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cj/ccjteqzvlszshm7xogphjn6lezrnhtguz6t2ft3y2qrajrcko2wk.py
# Topologically Sorted Source Nodes: [conv2d_19, x_27, x_28], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_19 => convolution_19
# x_27 => mul_39, where_19
# x_28 => _unsafe_index_16, _unsafe_index_17, _unsafe_index_18, _unsafe_index_19, add_32, add_33, add_34, mul_42, mul_43, mul_44, sub_31, sub_32, sub_34
# Graph fragment:
# %convolution_19 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_40, %primals_41, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_19, 0.1), kwargs = {})
# %where_19 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_19, %convolution_19, %mul_39), kwargs = {})
# %_unsafe_index_16 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %convert_element_type_17, %convert_element_type_19]), kwargs = {})
# %_unsafe_index_17 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %convert_element_type_17, %clamp_max_17]), kwargs = {})
# %_unsafe_index_18 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %clamp_max_16, %convert_element_type_19]), kwargs = {})
# %_unsafe_index_19 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %clamp_max_16, %clamp_max_17]), kwargs = {})
# %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_17, %_unsafe_index_16), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %clamp_max_18), kwargs = {})
# %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_16, %mul_42), kwargs = {})
# %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_19, %_unsafe_index_18), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %clamp_max_18), kwargs = {})
# %add_33 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_18, %mul_43), kwargs = {})
# %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_33, %add_32), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_34, %clamp_max_19), kwargs = {})
# %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_32, %mul_44), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x6 = (xindex // 4096)
x2 = (xindex // 4096) % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vz/cvzt4swwgye3zpxep4ligqcdlu5xxf7fecsvlec4qsz3qtn6tkxy.py
# Topologically Sorted Source Nodes: [conv2d_20, x_29], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_20 => convolution_20
# x_29 => gt_20
# Graph fragment:
# %convolution_20 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_34, %primals_42, %primals_43, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_20 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_20, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_40 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_40', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_40', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rm/crmdmqyxav4fb4725fm2hhwf6m4yrxuhqmo2dbcjkiu6gomd2akp.py
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_4 => cat_4
# Graph fragment:
# %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_20, %where_1], 1), kwargs = {})
triton_poi_fused_cat_41 = async_compile.triton('triton_poi_fused_cat_41', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 64
x0 = xindex % 4096
x2 = (xindex // 262144)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 64, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (4096*((-32) + x1)) + (131072*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p7/cp74qdkmdadhqce4dzechhvpihfuica6bbejv65ptme5otg3jhj3.py
# Topologically Sorted Source Nodes: [conv2d_22, x_31], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_22 => convolution_22
# x_31 => gt_22, mul_47, where_22
# Graph fragment:
# %convolution_22 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_21, %primals_46, %primals_47, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_22 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_22, 0), kwargs = {})
# %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_22, 0.1), kwargs = {})
# %where_22 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_22, %convolution_22, %mul_47), kwargs = {})
triton_poi_fused_convolution_leaky_relu_42 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_42', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_42', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_42(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128, ), (1, ))
assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256, ), (1, ))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512, ), (1, ))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512, ), (1, ))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512, ), (1, ))
assert_size_stride(primals_28, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_29, (512, ), (1, ))
assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_31, (256, ), (1, ))
assert_size_stride(primals_32, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_33, (256, ), (1, ))
assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_35, (128, ), (1, ))
assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (64, ), (1, ))
assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (64, ), (1, ))
assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_43, (32, ), (1, ))
assert_size_stride(primals_44, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_45, (32, ), (1, ))
assert_size_stride(primals_46, (4, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_47, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 524288, grid=grid(524288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, s1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 524288, grid=grid(524288), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_1.run(buf5, buf6, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf7, primals_7, buf8, buf9, 262144, grid=grid(262144), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
buf12 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf10, primals_9, buf11, buf12, 262144, grid=grid(262144), stream=stream0)
del primals_9
buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_3.run(buf12, buf13, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1))
buf15 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf14, primals_11, buf15, buf16, 131072, grid=grid(131072), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 128, 16, 16), (32768, 256, 16, 1))
buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
buf19 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf17, primals_13, buf18, buf19, 131072, grid=grid(131072), stream=stream0)
del primals_13
buf20 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_5.run(buf19, buf20, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf20, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 256, 8, 8), (16384, 64, 8, 1))
buf22 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
buf23 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf21, primals_15, buf22, buf23, 65536, grid=grid(65536), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1))
buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
buf26 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, x_9], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf24, primals_17, buf25, buf26, 65536, grid=grid(65536), stream=stream0)
del primals_17
buf27 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_7.run(buf26, buf27, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 4, 4), (8192, 16, 4, 1))
buf29 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
buf30 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_8.run(buf28, primals_19, buf29, buf30, 32768, grid=grid(32768), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf31 = extern_kernels.convolution(buf30, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 512, 4, 4), (8192, 16, 4, 1))
buf32 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
buf33 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, x_12], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_8.run(buf31, primals_21, buf32, buf33, 32768, grid=grid(32768), stream=stream0)
del primals_21
buf34 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_9.run(buf33, buf34, 8192, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf35 = extern_kernels.convolution(buf34, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 512, 2, 2), (2048, 4, 2, 1))
buf36 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
buf37 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_10.run(buf35, primals_23, buf36, buf37, 8192, grid=grid(8192), stream=stream0)
del buf35
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 2, 2), (2048, 4, 2, 1))
buf39 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_11, x_15], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_11.run(buf38, primals_25, buf39, 8192, grid=grid(8192), stream=stream0)
buf40 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_12.run(buf40, 4, grid=grid(4), stream=stream0)
buf41 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_13.run(buf41, 4, grid=grid(4), stream=stream0)
buf42 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_12.run(buf42, 4, grid=grid(4), stream=stream0)
buf43 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_13.run(buf43, 4, grid=grid(4), stream=stream0)
buf46 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14.run(buf46, 4, grid=grid(4), stream=stream0)
buf48 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14.run(buf48, 4, grid=grid(4), stream=stream0)
buf45 = buf31; del buf31 # reuse
buf49 = buf45; del buf45 # reuse
buf50 = buf49; del buf49 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15.run(buf50, buf41, buf42, buf39, buf38, primals_25, buf40, buf43, buf46, buf48, 32768, grid=grid(32768), stream=stream0)
del buf38
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 512, 4, 4), (8192, 16, 4, 1))
buf52 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_12, x_17], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_16.run(buf51, primals_27, buf52, 32768, grid=grid(32768), stream=stream0)
buf53 = reinterpret_tensor(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1), 0); del buf24 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_17.run(buf52, buf51, primals_27, buf33, buf53, 65536, grid=grid(65536), stream=stream0)
del buf51
del primals_27
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf54 = extern_kernels.convolution(buf53, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 512, 4, 4), (8192, 16, 4, 1))
buf55 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_13, x_18], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_16.run(buf54, primals_29, buf55, 32768, grid=grid(32768), stream=stream0)
buf56 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_18.run(buf56, 8, grid=grid(8), stream=stream0)
buf57 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_19.run(buf57, 8, grid=grid(8), stream=stream0)
buf58 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_18.run(buf58, 8, grid=grid(8), stream=stream0)
buf59 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_19.run(buf59, 8, grid=grid(8), stream=stream0)
buf62 = empty_strided_cuda((8, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20.run(buf62, 8, grid=grid(8), stream=stream0)
buf64 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20.run(buf64, 8, grid=grid(8), stream=stream0)
buf61 = reinterpret_tensor(buf17, (4, 512, 8, 8), (32768, 64, 8, 1), 0); del buf17 # reuse
buf65 = buf61; del buf61 # reuse
buf66 = buf65; del buf65 # reuse
# Topologically Sorted Source Nodes: [conv2d_13, x_18, x_19], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21.run(buf66, buf57, buf58, buf55, buf54, primals_29, buf56, buf59, buf62, buf64, 131072, grid=grid(131072), stream=stream0)
del buf54
del primals_29
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 256, 8, 8), (16384, 64, 8, 1))
buf68 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_14, x_20], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_22.run(buf67, primals_31, buf68, 65536, grid=grid(65536), stream=stream0)
buf69 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_23.run(buf68, buf67, primals_31, buf26, buf69, 131072, grid=grid(131072), stream=stream0)
del buf67
del primals_31
# Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution]
buf70 = extern_kernels.convolution(buf69, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 256, 8, 8), (16384, 64, 8, 1))
buf71 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_15, x_21], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_22.run(buf70, primals_33, buf71, 65536, grid=grid(65536), stream=stream0)
buf72 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_24.run(buf72, 16, grid=grid(16), stream=stream0)
buf73 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_25.run(buf73, 16, grid=grid(16), stream=stream0)
buf74 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_24.run(buf74, 16, grid=grid(16), stream=stream0)
buf75 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_25.run(buf75, 16, grid=grid(16), stream=stream0)
buf78 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26.run(buf78, 16, grid=grid(16), stream=stream0)
buf80 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26.run(buf80, 16, grid=grid(16), stream=stream0)
buf77 = reinterpret_tensor(buf10, (4, 256, 16, 16), (65536, 256, 16, 1), 0); del buf10 # reuse
buf81 = buf77; del buf77 # reuse
buf82 = buf81; del buf81 # reuse
# Topologically Sorted Source Nodes: [conv2d_15, x_21, x_22], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27.run(buf82, buf73, buf74, buf71, buf70, primals_33, buf72, buf75, buf78, buf80, 262144, grid=grid(262144), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution]
buf83 = extern_kernels.convolution(buf82, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1))
buf84 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_16, x_23], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_28.run(buf83, primals_35, buf84, 131072, grid=grid(131072), stream=stream0)
buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_29.run(buf84, buf83, primals_35, buf19, buf85, 262144, grid=grid(262144), stream=stream0)
del buf83
del primals_35
# Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution]
buf86 = extern_kernels.convolution(buf85, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 16, 16), (32768, 256, 16, 1))
buf87 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_17, x_24], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_28.run(buf86, primals_37, buf87, 131072, grid=grid(131072), stream=stream0)
buf88 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_30.run(buf88, 32, grid=grid(32), stream=stream0)
buf89 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_31.run(buf89, 32, grid=grid(32), stream=stream0)
buf90 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_30.run(buf90, 32, grid=grid(32), stream=stream0)
buf91 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_31.run(buf91, 32, grid=grid(32), stream=stream0)
buf94 = empty_strided_cuda((32, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32.run(buf94, 32, grid=grid(32), stream=stream0)
buf96 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32.run(buf96, 32, grid=grid(32), stream=stream0)
buf93 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0); del buf3 # reuse
buf97 = buf93; del buf93 # reuse
buf98 = buf97; del buf97 # reuse
# Topologically Sorted Source Nodes: [conv2d_17, x_24, x_25], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33.run(buf98, buf89, buf90, buf87, buf86, primals_37, buf88, buf91, buf94, buf96, 524288, grid=grid(524288), stream=stream0)
del buf86
del primals_37
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
buf99 = extern_kernels.convolution(buf98, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf100 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_18, x_26], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_34.run(buf99, primals_39, buf100, 262144, grid=grid(262144), stream=stream0)
buf101 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_35.run(buf100, buf99, primals_39, buf12, buf101, 524288, grid=grid(524288), stream=stream0)
del buf99
del primals_39
# Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution]
buf102 = extern_kernels.convolution(buf101, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf102, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf103 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_19, x_27], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_34.run(buf102, primals_41, buf103, 262144, grid=grid(262144), stream=stream0)
buf104 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_36.run(buf104, 64, grid=grid(64), stream=stream0)
buf105 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_37.run(buf105, 64, grid=grid(64), stream=stream0)
buf106 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_36.run(buf106, 64, grid=grid(64), stream=stream0)
buf107 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_37.run(buf107, 64, grid=grid(64), stream=stream0)
buf110 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38.run(buf110, 64, grid=grid(64), stream=stream0)
buf112 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38.run(buf112, 64, grid=grid(64), stream=stream0)
buf109 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
buf113 = buf109; del buf109 # reuse
buf114 = buf113; del buf113 # reuse
# Topologically Sorted Source Nodes: [conv2d_19, x_27, x_28], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39.run(buf114, buf105, buf106, buf103, buf102, primals_41, buf104, buf107, buf110, buf112, 1048576, grid=grid(1048576), stream=stream0)
del buf102
del primals_41
# Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution]
buf115 = extern_kernels.convolution(buf114, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf116 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_20, x_29], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_40.run(buf115, primals_43, buf116, 524288, grid=grid(524288), stream=stream0)
buf117 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
triton_poi_fused_cat_41.run(buf116, buf115, primals_43, buf5, buf117, 1048576, grid=grid(1048576), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution]
buf118 = extern_kernels.convolution(buf117, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf119 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf120 = buf115; del buf115 # reuse
# Topologically Sorted Source Nodes: [conv2d_21, x_30], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf118, primals_45, buf119, buf120, 524288, grid=grid(524288), stream=stream0)
del buf118
del primals_45
# Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution]
buf121 = extern_kernels.convolution(buf120, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf121, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf122 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool)
buf123 = reinterpret_tensor(buf70, (4, 4, 64, 64), (16384, 4096, 64, 1), 0); del buf70 # reuse
# Topologically Sorted Source Nodes: [conv2d_22, x_31], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_42.run(buf121, primals_47, buf122, buf123, 65536, grid=grid(65536), stream=stream0)
del buf121
del primals_47
return (buf123, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4, buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, buf29, buf30, buf32, buf33, buf34, buf36, buf37, buf39, buf40, buf41, buf42, buf43, buf46, buf48, buf50, buf52, buf53, buf55, buf56, buf57, buf58, buf59, buf62, buf64, buf66, buf68, buf69, buf71, buf72, buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf85, buf87, buf88, buf89, buf90, buf91, buf94, buf96, buf98, buf100, buf101, buf103, buf104, buf105, buf106, buf107, buf110, buf112, buf114, buf116, buf117, buf119, buf120, buf122, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 7, 7), (196, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 7, 7), (1568, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((512, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((4, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.functional import F
import torch.nn as nn
import torch.nn.functional as F
class down(nn.Module):
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=
1, padding=int((filterSize - 1) / 2))
self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride
=1, padding=int((filterSize - 1) / 2))
def forward(self, x):
""" Forward. """
x = F.avg_pool2d(x, 2)
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
return x
class up(nn.Module):
def __init__(self, inChannels, outChannels):
super(up, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1,
padding=1)
def forward(self, x, skpCn):
""" Forward . """
x = F.interpolate(x, scale_factor=2, mode='bilinear')
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)),
negative_slope=0.1)
return x
class UNet(nn.Module):
def __init__(self, inChannels, outChannels):
super(UNet, self).__init__()
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, x):
""" Forward."""
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope=0.1)
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'inChannels': 4, 'outChannels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.functional import F
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = triton_helpers.minimum(tmp10, tmp9)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4 % 4
x0 = xindex % 4
x6 = xindex // 16
x2 = xindex // 16 % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_16(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 1024
x0 = xindex % 16
x2 = xindex // 16384
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0).to(tl
.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 1024, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 16 * (-512 + x1) + 8192 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_18(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_19(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x6 = xindex // 64
x2 = xindex // 64 % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_22(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 512
x0 = xindex % 64
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0).to(
tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 512, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 7, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x6 = xindex // 256
x2 = xindex // 256 % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_28(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x2 = xindex // 65536
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0).to(
tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_30(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_31(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 15, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x6 = xindex // 1024
x2 = xindex // 1024 % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_34(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 128
x0 = xindex % 1024
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0
).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_36(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_37(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x6 = xindex // 4096
x2 = xindex // 4096 % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_40(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 64
x0 = xindex % 4096
x2 = xindex // 262144
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0
).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_42(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_29, (512,), (1,))
assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_33, (256,), (1,))
assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_35, (128,), (1,))
assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (64,), (1,))
assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (64,), (1,))
assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_43, (32,), (1,))
assert_size_stride(primals_44, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_45, (32,), (1,))
assert_size_stride(primals_46, (4, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_47, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0,
primals_2, buf1, buf2, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf3,
primals_5, buf4, buf5, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
triton_poi_fused_avg_pool2d_1[grid(131072)](buf5, buf6, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf7,
primals_7, buf8, buf9, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
buf12 = buf7
del buf7
triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf10,
primals_9, buf11, buf12, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_9
buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
triton_poi_fused_avg_pool2d_3[grid(65536)](buf12, buf13, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1))
buf15 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf14,
primals_11, buf15, buf16, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_11
buf17 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 128, 16, 16), (32768, 256, 16, 1))
buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
buf19 = buf14
del buf14
triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf17,
primals_13, buf18, buf19, 131072, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_13
buf20 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
triton_poi_fused_avg_pool2d_5[grid(32768)](buf19, buf20, 32768,
XBLOCK=128, num_warps=4, num_stages=1)
buf21 = extern_kernels.convolution(buf20, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 256, 8, 8), (16384, 64, 8, 1))
buf22 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
buf23 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf21,
primals_15, buf22, buf23, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_15
buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1))
buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
buf26 = buf21
del buf21
triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf24,
primals_17, buf25, buf26, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_17
buf27 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
triton_poi_fused_avg_pool2d_7[grid(16384)](buf26, buf27, 16384,
XBLOCK=128, num_warps=4, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 4, 4), (8192, 16, 4, 1))
buf29 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
buf30 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf28,
primals_19, buf29, buf30, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_19
buf31 = extern_kernels.convolution(buf30, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 512, 4, 4), (8192, 16, 4, 1))
buf32 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
buf33 = buf28
del buf28
triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf31,
primals_21, buf32, buf33, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_21
buf34 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.
float32)
triton_poi_fused_avg_pool2d_9[grid(8192)](buf33, buf34, 8192,
XBLOCK=256, num_warps=4, num_stages=1)
buf35 = extern_kernels.convolution(buf34, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 512, 2, 2), (2048, 4, 2, 1))
buf36 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
buf37 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_10[grid(8192)](buf35,
primals_23, buf36, buf37, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del buf35
del primals_23
buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 2, 2), (2048, 4, 2, 1))
buf39 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_11[grid(8192)](buf38,
primals_25, buf39, 8192, XBLOCK=256, num_warps=4, num_stages=1)
buf40 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_12[grid(4)](buf40, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf41 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_13[grid(4)](buf41, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_12[grid(4)](buf42, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf43 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_add_clamp_13[grid(4)](buf43, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf46 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(4)](buf46,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf48 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(4)](buf48,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf45 = buf31
del buf31
buf49 = buf45
del buf45
buf50 = buf49
del buf49
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15[
grid(32768)](buf50, buf41, buf42, buf39, buf38, primals_25,
buf40, buf43, buf46, buf48, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf38
del primals_25
buf51 = extern_kernels.convolution(buf50, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 512, 4, 4), (8192, 16, 4, 1))
buf52 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf51,
primals_27, buf52, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf53 = reinterpret_tensor(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1), 0
)
del buf24
triton_poi_fused_cat_17[grid(65536)](buf52, buf51, primals_27,
buf33, buf53, 65536, XBLOCK=512, num_warps=4, num_stages=1)
del buf51
del primals_27
buf54 = extern_kernels.convolution(buf53, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 512, 4, 4), (8192, 16, 4, 1))
buf55 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf54,
primals_29, buf55, 32768, XBLOCK=256, num_warps=4, num_stages=1)
buf56 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_18[grid(8)](buf56, 8, XBLOCK=8, num_warps
=1, num_stages=1)
buf57 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_19[grid(8)](buf57, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf58 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_18[grid(8)](buf58, 8, XBLOCK=8, num_warps
=1, num_stages=1)
buf59 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused_add_clamp_19[grid(8)](buf59, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf62 = empty_strided_cuda((8,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20[grid(8)](buf62,
8, XBLOCK=8, num_warps=1, num_stages=1)
buf64 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20[grid(8)](buf64,
8, XBLOCK=8, num_warps=1, num_stages=1)
buf61 = reinterpret_tensor(buf17, (4, 512, 8, 8), (32768, 64, 8, 1), 0)
del buf17
buf65 = buf61
del buf61
buf66 = buf65
del buf65
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21[
grid(131072)](buf66, buf57, buf58, buf55, buf54, primals_29,
buf56, buf59, buf62, buf64, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del buf54
del primals_29
buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 256, 8, 8), (16384, 64, 8, 1))
buf68 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf67,
primals_31, buf68, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf69 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_23[grid(131072)](buf68, buf67, primals_31,
buf26, buf69, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del buf67
del primals_31
buf70 = extern_kernels.convolution(buf69, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 256, 8, 8), (16384, 64, 8, 1))
buf71 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf70,
primals_33, buf71, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf72 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_24[grid(16)](buf72, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf73 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_25[grid(16)](buf73, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf74 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_24[grid(16)](buf74, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf75 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused_add_clamp_25[grid(16)](buf75, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf78 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26[grid(16)](buf78,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf80 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26[grid(16)](buf80,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf77 = reinterpret_tensor(buf10, (4, 256, 16, 16), (65536, 256, 16,
1), 0)
del buf10
buf81 = buf77
del buf77
buf82 = buf81
del buf81
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27[
grid(262144)](buf82, buf73, buf74, buf71, buf70, primals_33,
buf72, buf75, buf78, buf80, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_33
buf83 = extern_kernels.convolution(buf82, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1))
buf84 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf83,
primals_35, buf84, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.float32)
triton_poi_fused_cat_29[grid(262144)](buf84, buf83, primals_35,
buf19, buf85, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
del buf83
del primals_35
buf86 = extern_kernels.convolution(buf85, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 16, 16), (32768, 256, 16, 1))
buf87 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf86,
primals_37, buf87, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
buf88 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_30[grid(32)](buf88, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf89 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_31[grid(32)](buf89, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf90 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_30[grid(32)](buf90, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf91 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused_add_clamp_31[grid(32)](buf91, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf94 = empty_strided_cuda((32,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32[grid(32)](buf94,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf96 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32[grid(32)](buf96,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf93 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024,
32, 1), 0)
del buf3
buf97 = buf93
del buf93
buf98 = buf97
del buf97
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33[
grid(524288)](buf98, buf89, buf90, buf87, buf86, primals_37,
buf88, buf91, buf94, buf96, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf86
del primals_37
buf99 = extern_kernels.convolution(buf98, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf100 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf99,
primals_39, buf100, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf101 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
triton_poi_fused_cat_35[grid(524288)](buf100, buf99, primals_39,
buf12, buf101, 524288, XBLOCK=512, num_warps=8, num_stages=1)
del buf99
del primals_39
buf102 = extern_kernels.convolution(buf101, primals_40, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf102, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf103 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf102,
primals_41, buf103, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf104 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_36[grid(64)](buf104, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf105 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_37[grid(64)](buf105, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf106 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_36[grid(64)](buf106, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf107 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_37[grid(64)](buf107, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf110 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38[grid(64)](buf110,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf112 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38[grid(64)](buf112,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf109 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
buf113 = buf109
del buf109
buf114 = buf113
del buf113
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39[
grid(1048576)](buf114, buf105, buf106, buf103, buf102,
primals_41, buf104, buf107, buf110, buf112, 1048576, XBLOCK=
1024, num_warps=4, num_stages=1)
del buf102
del primals_41
buf115 = extern_kernels.convolution(buf114, primals_42, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf116 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_40[grid(524288)](buf115,
primals_43, buf116, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf117 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_41[grid(1048576)](buf116, buf115, primals_43,
buf5, buf117, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf118 = extern_kernels.convolution(buf117, primals_44, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf119 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf120 = buf115
del buf115
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf118,
primals_45, buf119, buf120, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf118
del primals_45
buf121 = extern_kernels.convolution(buf120, primals_46, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf121, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf122 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.bool)
buf123 = reinterpret_tensor(buf70, (4, 4, 64, 64), (16384, 4096, 64,
1), 0)
del buf70
triton_poi_fused_convolution_leaky_relu_42[grid(65536)](buf121,
primals_47, buf122, buf123, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del buf121
del primals_47
return (buf123, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4,
buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18,
buf19, buf20, buf22, buf23, buf25, buf26, buf27, buf29, buf30,
buf32, buf33, buf34, buf36, buf37, buf39, buf40, buf41, buf42,
buf43, buf46, buf48, buf50, buf52, buf53, buf55, buf56, buf57,
buf58, buf59, buf62, buf64, buf66, buf68, buf69, buf71, buf72,
buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf85, buf87,
buf88, buf89, buf90, buf91, buf94, buf96, buf98, buf100, buf101,
buf103, buf104, buf105, buf106, buf107, buf110, buf112, buf114,
buf116, buf117, buf119, buf120, buf122)
class down(nn.Module):
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=
1, padding=int((filterSize - 1) / 2))
self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride
=1, padding=int((filterSize - 1) / 2))
def forward(self, x):
""" Forward. """
x = F.avg_pool2d(x, 2)
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
return x
class up(nn.Module):
def __init__(self, inChannels, outChannels):
super(up, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1,
padding=1)
def forward(self, x, skpCn):
""" Forward . """
x = F.interpolate(x, scale_factor=2, mode='bilinear')
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)),
negative_slope=0.1)
return x
class UNetNew(nn.Module):
def __init__(self, inChannels, outChannels):
super(UNetNew, self).__init__()
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.down1.conv1.weight
primals_7 = self.down1.conv1.bias
primals_8 = self.down1.conv2.weight
primals_9 = self.down1.conv2.bias
primals_10 = self.down2.conv1.weight
primals_11 = self.down2.conv1.bias
primals_12 = self.down2.conv2.weight
primals_13 = self.down2.conv2.bias
primals_14 = self.down3.conv1.weight
primals_15 = self.down3.conv1.bias
primals_16 = self.down3.conv2.weight
primals_17 = self.down3.conv2.bias
primals_18 = self.down4.conv1.weight
primals_19 = self.down4.conv1.bias
primals_20 = self.down4.conv2.weight
primals_21 = self.down4.conv2.bias
primals_22 = self.down5.conv1.weight
primals_23 = self.down5.conv1.bias
primals_24 = self.down5.conv2.weight
primals_25 = self.down5.conv2.bias
primals_26 = self.up1.conv1.weight
primals_27 = self.up1.conv1.bias
primals_28 = self.up1.conv2.weight
primals_29 = self.up1.conv2.bias
primals_30 = self.up2.conv1.weight
primals_31 = self.up2.conv1.bias
primals_32 = self.up2.conv2.weight
primals_33 = self.up2.conv2.bias
primals_34 = self.up3.conv1.weight
primals_35 = self.up3.conv1.bias
primals_36 = self.up3.conv2.weight
primals_37 = self.up3.conv2.bias
primals_38 = self.up4.conv1.weight
primals_39 = self.up4.conv1.bias
primals_40 = self.up4.conv2.weight
primals_41 = self.up4.conv2.bias
primals_42 = self.up5.conv1.weight
primals_43 = self.up5.conv1.bias
primals_44 = self.up5.conv2.weight
primals_45 = self.up5.conv2.bias
primals_46 = self.conv3.weight
primals_47 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47])
return output[0]
| delldu/VideoSlow | UNet | false | 6,711 | [
"MIT"
] | 1 | 2badbbfa2187ea15ea37ef35e70a103ef98c1e33 | https://github.com/delldu/VideoSlow/tree/2badbbfa2187ea15ea37ef35e70a103ef98c1e33 | import torch
from torch.functional import F
import torch.nn as nn
import torch.nn.functional as F
class down(nn.Module):
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super().__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=
1, padding=int((filterSize - 1) / 2))
self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride
=1, padding=int((filterSize - 1) / 2))
def forward(self, x):
""" Forward. """
x = F.avg_pool2d(x, 2)
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
return x
class up(nn.Module):
def __init__(self, inChannels, outChannels):
super().__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1,
padding=1)
def forward(self, x, skpCn):
""" Forward . """
x = F.interpolate(x, scale_factor=2, mode='bilinear')
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)),
negative_slope=0.1)
return x
class Model(nn.Module):
def __init__(self, inChannels, outChannels):
super().__init__()
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, x):
""" Forward."""
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope=0.1)
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [4, 4]
|
ConstantODE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/lz/clzgdfddrozy5odymngj4cdkrvkdttixpcmxu2nsxxlqvfkk3aed.py
# Topologically Sorted Source Nodes: [mul, add, sub, pow_1, add_1], Original ATen: [aten.mul, aten.add, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_4, %add), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %pow_1), kwargs = {})
triton_poi_fused_add_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_add_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tl.load(in_ptr2 + (x0), xmask)
tmp5 = tl.load(in_ptr3 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp7 = tmp4 + tmp6
tmp8 = tmp2 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp10 * tmp8
tmp12 = tmp1 + tmp11
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add, sub, pow_1, add_1], Original ATen: [aten.mul, aten.add, aten.sub, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_sub_0.run(primals_1, primals_4, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, primals_3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class ConstantODE(torch.nn.Module):
def __init__(self):
super(ConstantODE, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2))
self.b = torch.nn.Parameter(torch.tensor(3.0))
def forward(self, t, y):
return self.a + (y - (self.a * t + self.b)) ** 5
def y_exact(self, t):
return self.a * t + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask)
tmp5 = tl.load(in_ptr3 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp7 = tmp4 + tmp6
tmp8 = tmp2 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp10 * tmp8
tmp12 = tmp1 + tmp11
tl.store(out_ptr0 + x0, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sub_0[grid(256)](primals_1, primals_4,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
return buf0, primals_1, primals_2, primals_3, primals_4
class ConstantODENew(torch.nn.Module):
def __init__(self):
super(ConstantODENew, self).__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2))
self.b = torch.nn.Parameter(torch.tensor(3.0))
def y_exact(self, t):
return self.a * t + self.b
def forward(self, input_0, input_1):
primals_1 = self.a
primals_3 = self.b
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| gaozhihan/torchdiffeq | ConstantODE | false | 6,712 | [
"MIT"
] | 1 | 414781617d595ba01cc3f23382e25ab890f4ca66 | https://github.com/gaozhihan/torchdiffeq/tree/414781617d595ba01cc3f23382e25ab890f4ca66 | import torch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.nn.Parameter(torch.tensor(0.2))
self.b = torch.nn.Parameter(torch.tensor(3.0))
def forward(self, t, y):
return self.a + (y - (self.a * t + self.b)) ** 5
def y_exact(self, t):
return self.a * t + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SineODE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bk/cbkiytchil7drgaq6pb5atrw5yi2nhe4ukuncsiopwphvtjq2hra.py
# Topologically Sorted Source Nodes: [mul, truediv, pow_1, mul_1, sin, mul_2, add, pow_2, sub, pow_3, mul_3, add_1], Original ATen: [aten.mul, aten.div, aten.pow, aten.sin, aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# sin => sin
# sub => sub
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 2), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %sin), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %mul_2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %pow_2), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_3, 4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %mul_3), kwargs = {})
triton_poi_fused_add_div_mul_pow_sin_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sin_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sin_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sin_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 / tmp3
tmp5 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp3 * tmp1
tmp8 = tl_math.sin(tmp7)
tmp9 = tmp6 * tmp8
tmp10 = tmp4 + tmp9
tmp11 = tmp10 - tmp5
tmp12 = tmp5 * tmp3
tmp13 = 4.0
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tl.store(out_ptr0 + (x0), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, pow_1, mul_1, sin, mul_2, add, pow_2, sub, pow_3, mul_3, add_1], Original ATen: [aten.mul, aten.div, aten.pow, aten.sin, aten.add, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sin_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
class SineODE(torch.nn.Module):
def forward(self, t, y):
return 2 * y / t + t ** 4 * torch.sin(2 * t) - t ** 2 + 4 * t ** 3
def y_exact(self, t):
return -0.5 * t ** 4 * torch.cos(2 * t) + 0.5 * t ** 3 * torch.sin(
2 * t) + 0.25 * t ** 2 * torch.cos(2 * t) - t ** 3 + 2 * t ** 4 + (
math.pi - 0.25) * t ** 2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sin_sub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 / tmp3
tmp5 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp3 * tmp1
tmp8 = tl_math.sin(tmp7)
tmp9 = tmp6 * tmp8
tmp10 = tmp4 + tmp9
tmp11 = tmp10 - tmp5
tmp12 = tmp5 * tmp3
tmp13 = 4.0
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tl.store(out_ptr0 + x0, tmp15, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sin_sub_0[grid(256)](arg0_1,
arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class SineODENew(torch.nn.Module):
def y_exact(self, t):
return -0.5 * t ** 4 * torch.cos(2 * t) + 0.5 * t ** 3 * torch.sin(
2 * t) + 0.25 * t ** 2 * torch.cos(2 * t) - t ** 3 + 2 * t ** 4 + (
math.pi - 0.25) * t ** 2
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| gaozhihan/torchdiffeq | SineODE | false | 6,713 | [
"MIT"
] | 1 | 414781617d595ba01cc3f23382e25ab890f4ca66 | https://github.com/gaozhihan/torchdiffeq/tree/414781617d595ba01cc3f23382e25ab890f4ca66 | import math
import torch
class Model(torch.nn.Module):
def forward(self, t, y):
return 2 * y / t + t ** 4 * torch.sin(2 * t) - t ** 2 + 4 * t ** 3
def y_exact(self, t):
return -0.5 * t ** 4 * torch.cos(2 * t) + 0.5 * t ** 3 * torch.sin(
2 * t) + 0.25 * t ** 2 * torch.cos(2 * t) - t ** 3 + 2 * t ** 4 + (
math.pi - 0.25) * t ** 2
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ResidualBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wx/cwxggww5mfxi3uxu4pue4rwqhhp7r7zav5gn74a5qqtrbtm3ooj5.py
# Topologically Sorted Source Nodes: [tanh, sigmoid, z], Original ATen: [aten.tanh, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# tanh => tanh
# z => mul
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_2,), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_5,), kwargs = {})
# %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 2
x2 = (xindex // 8)
x4 = xindex % 8
x3 = xindex
tmp10 = tl.load(in_ptr0 + (x0 + (7*x1) + (28*x2)), xmask)
tmp11 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (14 + x0 + (7*x1) + (28*x2)), xmask)
tmp23 = tl.load(in_ptr1 + (2 + x1), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + (7*x1) + (28*x2)), tmp2 & xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + (x1), tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tmp3 + tmp4
tmp6 = tl.load(in_ptr2 + (x4 + (16*x2)), tmp2 & xmask, other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp2, tmp7, tmp8)
tmp12 = tmp10 + tmp11
tmp13 = tl.where(tmp2, tmp9, tmp12)
tmp14 = libdevice.tanh(tmp13)
tmp15 = tl.load(in_ptr0 + (14 + x0 + (7*x1) + (28*x2)), tmp2 & xmask, other=0.0)
tmp16 = tl.load(in_ptr1 + (2 + x1), tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = tl.load(in_ptr2 + (8 + x4 + (16*x2)), tmp2 & xmask, other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp2, tmp19, tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tl.where(tmp2, tmp21, tmp24)
tmp26 = tl.sigmoid(tmp25)
tmp27 = tmp14 * tmp26
tl.store(out_ptr0 + (x3), tmp14, xmask)
tl.store(out_ptr1 + (x3), tmp26, xmask)
tl.store(out_ptr2 + (x3), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/us/custepuaxcgmeci6okpmedesxdvzu45qqfj6xlln2yvyowwq2gle.py
# Topologically Sorted Source Nodes: [conv1d_1, residual], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# conv1d_1 => convolution_1
# residual => add_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_5, %primals_6, [1], [0], [1], False, [0], 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lf/clf7hs52i4bd5d3e73uio27ntyjfqmszkbsw6dta3r6rzgeftva3.py
# Topologically Sorted Source Nodes: [skip_connection], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# skip_connection => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_7, %primals_8, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 2, 1), (2, 1, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 2, 1), (2, 1, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7), (28, 7, 1))
buf1 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, sigmoid, z], Original ATen: [aten.tanh, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0.run(buf0, primals_3, primals_4, buf1, buf2, buf3, 32, grid=grid(32), stream=stream0)
del buf0
del primals_3
del primals_4
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv1d_1, residual], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_1.run(buf5, primals_6, primals_1, 64, grid=grid(64), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [skip_connection], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf3, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4), (16, 4, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [skip_connection], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf7, primals_8, 64, grid=grid(64), stream=stream0)
del primals_8
return (buf5, buf7, primals_1, primals_2, primals_5, primals_7, buf1, buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 2, 1), (2, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 2, 1), (2, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ResidualBlock(nn.Module):
def __init__(self, filter_size, dilation, residual_channels,
dilated_channels, skip_channels):
super().__init__()
self.conv = nn.Conv1d(residual_channels, dilated_channels,
kernel_size=filter_size, padding=dilation * (filter_size - 1),
dilation=dilation)
self.res = nn.Conv1d(dilated_channels // 2, residual_channels, 1)
self.skip = nn.Conv1d(dilated_channels // 2, skip_channels, 1)
self.filter_size = filter_size
self.dilation = dilation
self.residual_channels = residual_channels
def forward(self, x, condition):
length = x.shape[2]
h = self.conv(x)
h = h[:, :, :length]
h += condition
tanh_z, sig_z = torch.split(h, h.size(1) // 2, dim=1)
z = torch.tanh(tanh_z) * torch.sigmoid(sig_z)
if x.shape[2] == z.shape[2]:
residual = self.res(z) + x
else:
residual = self.res(z) + x[:, :, -1:]
skip_connection = self.skip(z)
return residual, skip_connection
def initialize(self, n):
self.queue = torch.zeros((n, self.residual_channels, self.dilation *
(self.filter_size - 1) + 1), dtype=self.conv.weight.dtype)
self.conv.padding = 0
def pop(self, condition):
return self(self.queue, condition)
def push(self, x):
self.queue = torch.cat((self.queue[:, :, 1:], x), dim=2)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'filter_size': 4, 'dilation': 1, 'residual_channels': 4,
'dilated_channels': 4, 'skip_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 2
x2 = xindex // 8
x4 = xindex % 8
x3 = xindex
tmp10 = tl.load(in_ptr0 + (x0 + 7 * x1 + 28 * x2), xmask)
tmp11 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (14 + x0 + 7 * x1 + 28 * x2), xmask)
tmp23 = tl.load(in_ptr1 + (2 + x1), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 7 * x1 + 28 * x2), tmp2 & xmask, other=0.0)
tmp4 = tl.load(in_ptr1 + x1, tmp2 & xmask, eviction_policy='evict_last',
other=0.0)
tmp5 = tmp3 + tmp4
tmp6 = tl.load(in_ptr2 + (x4 + 16 * x2), tmp2 & xmask, other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp2, tmp7, tmp8)
tmp12 = tmp10 + tmp11
tmp13 = tl.where(tmp2, tmp9, tmp12)
tmp14 = libdevice.tanh(tmp13)
tmp15 = tl.load(in_ptr0 + (14 + x0 + 7 * x1 + 28 * x2), tmp2 & xmask,
other=0.0)
tmp16 = tl.load(in_ptr1 + (2 + x1), tmp2 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = tl.load(in_ptr2 + (8 + x4 + 16 * x2), tmp2 & xmask, other=0.0)
tmp19 = tmp17 + tmp18
tmp20 = tl.full(tmp19.shape, 0.0, tmp19.dtype)
tmp21 = tl.where(tmp2, tmp19, tmp20)
tmp24 = tmp22 + tmp23
tmp25 = tl.where(tmp2, tmp21, tmp24)
tmp26 = tl.sigmoid(tmp25)
tmp27 = tmp14 * tmp26
tl.store(out_ptr0 + x3, tmp14, xmask)
tl.store(out_ptr1 + x3, tmp26, xmask)
tl.store(out_ptr2 + x3, tmp27, xmask)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 2, 1), (2, 1, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 2, 1), (2, 1, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(3,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7), (28, 7, 1))
buf1 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0[grid(32)](buf0, primals_3,
primals_4, buf1, buf2, buf3, 32, XBLOCK=32, num_warps=1,
num_stages=1)
del buf0
del primals_3
del primals_4
buf4 = extern_kernels.convolution(buf3, primals_5, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_1[grid(64)](buf5, primals_6,
primals_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_6
buf6 = extern_kernels.convolution(buf3, primals_7, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4), (16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_2[grid(64)](buf7, primals_8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
return (buf5, buf7, primals_1, primals_2, primals_5, primals_7, buf1,
buf2, buf3)
class ResidualBlockNew(nn.Module):
def __init__(self, filter_size, dilation, residual_channels,
dilated_channels, skip_channels):
super().__init__()
self.conv = nn.Conv1d(residual_channels, dilated_channels,
kernel_size=filter_size, padding=dilation * (filter_size - 1),
dilation=dilation)
self.res = nn.Conv1d(dilated_channels // 2, residual_channels, 1)
self.skip = nn.Conv1d(dilated_channels // 2, skip_channels, 1)
self.filter_size = filter_size
self.dilation = dilation
self.residual_channels = residual_channels
def initialize(self, n):
self.queue = torch.zeros((n, self.residual_channels, self.dilation *
(self.filter_size - 1) + 1), dtype=self.conv.weight.dtype)
self.conv.padding = 0
def pop(self, condition):
return self(self.queue, condition)
def push(self, x):
self.queue = torch.cat((self.queue[:, :, 1:], x), dim=2)
def forward(self, input_0, input_1):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_5 = self.res.weight
primals_6 = self.res.bias
primals_7 = self.skip.weight
primals_8 = self.skip.bias
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| fukuroder/pytorch_lightning_wavenet | ResidualBlock | false | 6,714 | [
"MIT"
] | 1 | 440ef4092397998edf0df4625f1f10157db2243e | https://github.com/fukuroder/pytorch_lightning_wavenet/tree/440ef4092397998edf0df4625f1f10157db2243e | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, filter_size, dilation, residual_channels,
dilated_channels, skip_channels):
super().__init__()
self.conv = nn.Conv1d(residual_channels, dilated_channels,
kernel_size=filter_size, padding=dilation * (filter_size - 1),
dilation=dilation)
self.res = nn.Conv1d(dilated_channels // 2, residual_channels, 1)
self.skip = nn.Conv1d(dilated_channels // 2, skip_channels, 1)
self.filter_size = filter_size
self.dilation = dilation
self.residual_channels = residual_channels
def forward(self, x, condition):
length = x.shape[2]
h = self.conv(x)
h = h[:, :, :length]
h += condition
tanh_z, sig_z = torch.split(h, h.size(1) // 2, dim=1)
z = torch.tanh(tanh_z) * torch.sigmoid(sig_z)
if x.shape[2] == z.shape[2]:
residual = self.res(z) + x
else:
residual = self.res(z) + x[:, :, -1:]
skip_connection = self.skip(z)
return residual, skip_connection
def initialize(self, n):
self.queue = torch.zeros((n, self.residual_channels, self.dilation *
(self.filter_size - 1) + 1), dtype=self.conv.weight.dtype)
self.conv.padding = 0
def pop(self, condition):
return self(self.queue, condition)
def push(self, x):
self.queue = torch.cat((self.queue[:, :, 1:], x), dim=2)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'filter_size': 4, 'dilation': 1, 'residual_channels': 4,
'dilated_channels': 4, 'skip_channels': 4}]
|
EqualConvTranspose2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/mr/cmrzofxtfa5fe3ax4o3n5qvgpvhbgcrspjauzarmp4t443npav4h.py
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# weight => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.1767766952966369), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.1767766952966369
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ct/cctgcjr2wf4z7vvek4wfd2w6r2vedlixvdnsojtj2li6o37bj4na.py
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv_transpose2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %mul, %primals_2, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 49) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weight], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 7, 7), (196, 49, 7, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_2, 784, grid=grid(784), stream=stream0)
del primals_2
return (buf2, buf0, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualConvTranspose2d(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.ConvTranspose2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from math import sqrt
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.1767766952966369
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(primals_3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 7, 7), (196, 49, 7, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(784)](buf2, primals_2, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, buf0, primals_3, buf0
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class EqualConvTranspose2dNew(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.ConvTranspose2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input_0):
primals_2 = self.conv.bias
primals_1 = self.conv.weight_orig
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| g33sean/RTIL | EqualConvTranspose2d | false | 6,715 | [
"BSD-2-Clause",
"MIT"
] | 1 | 5325f6d5e3ddf7579b6bd8199898e00eff3da631 | https://github.com/g33sean/RTIL/tree/5325f6d5e3ddf7579b6bd8199898e00eff3da631 | import torch
from torch import nn
from math import sqrt
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
class Model(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
conv = nn.ConvTranspose2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
CombineSlices | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xj/cxjyiylkxhhw7puycpyh4rhisqin43l5kgucj4lyyniez2ypunvk.py
# Topologically Sorted Source Nodes: [index_select], Original ATen: [aten.index_select]
# Source node to ATen node mapping:
# index_select => index
# Graph fragment:
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, None, %full_default]), kwargs = {})
triton_poi_fused_index_select_0 = async_compile.triton('triton_poi_fused_index_select_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_select_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_select_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [index_select], Original ATen: [aten.index_select]
stream0 = get_raw_stream(0)
triton_poi_fused_index_select_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
class CombineSlices(nn.Module):
def __init__(self, slice_dim=2):
super().__init__()
self.slice_dim = slice_dim
def forward(self, x):
return torch.index_select(x, dim=self.slice_dim, index=torch.tensor
(0, device=x.device))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_select_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_select_0[grid(64)](arg0_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class CombineSlicesNew(nn.Module):
def __init__(self, slice_dim=2):
super().__init__()
self.slice_dim = slice_dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| gbosdet/fastMRI | CombineSlices | false | 6,716 | [
"MIT"
] | 1 | 7f94f8006f8919d98fb87788b6dadec9a58d1a3a | https://github.com/gbosdet/fastMRI/tree/7f94f8006f8919d98fb87788b6dadec9a58d1a3a | import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
class Model(nn.Module):
def __init__(self, slice_dim=2):
super().__init__()
self.slice_dim = slice_dim
def forward(self, x):
return torch.index_select(x, dim=self.slice_dim, index=torch.tensor
(0, device=x.device))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3j/c3jk4fd45xsskb354bmqh5ayvalm334wxs72twddoal7gsrew3wi.py
# Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm => add, add_1, mul_1, rsqrt, var_mean
# out => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_per_fused_native_group_norm_relu_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + (16*x0)), tmp29, xmask)
tl.store(out_ptr3 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/43/c43iah2ujzdzlzvirc5zcusvrhdz3liemhgusdpro5bcmzekdxpa.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add_4
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0.run(primals_1, primals_2, primals_3, buf0, buf3, buf12, 16, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_3
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.native_group_norm, aten.relu]
triton_per_fused_native_group_norm_relu_0.run(buf4, primals_5, primals_6, buf5, buf9, buf8, 16, 16, grid=grid(16), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf11, primals_1, 256, grid=grid(256), stream=stream0)
return (buf11, primals_1, primals_4, primals_5, primals_7, buf3, buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf12, (4, 4, 1), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ResBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, x):
shortcut = x
out = self.relu(self.norm1(x))
if self.downsample is not None:
shortcut = self.downsample(out)
out = self.conv1(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
return out + shortcut
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(16)](primals_1,
primals_2, primals_3, buf0, buf3, buf12, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_2
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_native_group_norm_relu_0[grid(16)](buf4, primals_5,
primals_6, buf5, buf9, buf8, 16, 16, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_6
buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf11 = buf10
del buf10
triton_poi_fused_add_1[grid(256)](buf11, primals_1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return (buf11, primals_1, primals_4, primals_5, primals_7, buf3, buf4,
reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(
buf8, (4, 4), (4, 1), 0), buf9, reinterpret_tensor(buf0, (4, 4, 1),
(4, 1, 1), 0), reinterpret_tensor(buf12, (4, 4, 1), (4, 1, 1), 0))
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class ResBlockNew(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlockNew, self).__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, input_0):
primals_2 = self.norm1.weight
primals_3 = self.norm1.bias
primals_4 = self.conv1.weight
primals_5 = self.norm2.weight
primals_6 = self.norm2.bias
primals_7 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| gaozhihan/torchdiffeq | ResBlock | false | 6,717 | [
"MIT"
] | 1 | 414781617d595ba01cc3f23382e25ab890f4ca66 | https://github.com/gaozhihan/torchdiffeq/tree/414781617d595ba01cc3f23382e25ab890f4ca66 | import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm(dim):
return nn.GroupNorm(min(32, dim), dim)
class Model(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super().__init__()
self.norm1 = norm(inplanes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.conv1 = conv3x3(inplanes, planes, stride)
self.norm2 = norm(planes)
self.conv2 = conv3x3(planes, planes)
def forward(self, x):
shortcut = x
out = self.relu(self.norm1(x))
if self.downsample is not None:
shortcut = self.downsample(out)
out = self.conv1(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
return out + shortcut
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ConcatConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cb/ccbis3vvjrqsccylnjrwg7mqmq5kwjkv22g642kbfi72exsoiplk.py
# Topologically Sorted Source Nodes: [ttx], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# ttx => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 5
x0 = xindex % 16
x2 = (xindex // 80)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-1) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_3, (4, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ttx], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 320, grid=grid(320), stream=stream0)
del primals_1
del primals_2
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0)
del primals_4
return (buf2, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 5, 3, 3), (45, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ConcatConv2d(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2d, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride
=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 5
x0 = xindex % 16
x2 = xindex // 80
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_3, (4, 5, 3, 3), (45, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(320)](primals_2, primals_1, buf0, 320,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
return buf2, primals_3, buf0
class ConcatConv2dNew(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super(ConcatConv2dNew, self).__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride
=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, input_0, input_1):
primals_3 = self._layer.weight
primals_4 = self._layer.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| gaozhihan/torchdiffeq | ConcatConv2d | false | 6,718 | [
"MIT"
] | 1 | 414781617d595ba01cc3f23382e25ab890f4ca66 | https://github.com/gaozhihan/torchdiffeq/tree/414781617d595ba01cc3f23382e25ab890f4ca66 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0,
dilation=1, groups=1, bias=True, transpose=False):
super().__init__()
module = nn.ConvTranspose2d if transpose else nn.Conv2d
self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride
=stride, padding=padding, dilation=dilation, groups=groups,
bias=bias)
def forward(self, t, x):
tt = torch.ones_like(x[:, :1, :, :]) * t
ttx = torch.cat([tt, x], 1)
return self._layer(ttx)
def get_inputs():
return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tf/ctfrmkdzcncuvqa3lx5gfprtbhmrpnkdxzqqmfnra2srkxlmy2kn.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/re/creookpdgr2hf34ub6gmeaguf32rxk6p3rlylk2rt2cuu4sg2o5z.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.view]
# Source node to ATen node mapping:
# out_2 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 20]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = (xindex // 20)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (20*x1) + (80*((x1 % 4) // 4)) + (320*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 20), (20, 1))
assert_size_stride(primals_5, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 1280, grid=grid(1280), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 1280, grid=grid(1280), stream=stream0)
del buf1
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (20, 2), (1, 20), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((20, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Decoder(nn.Module):
def __init__(self, latent_dim=4, obs_dim=2, nhidden=20):
super(Decoder, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(latent_dim, nhidden)
self.fc2 = nn.Linear(nhidden, obs_dim)
def forward(self, z):
out = self.fc1(z)
out = self.relu(out)
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 20
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 20
x1 = xindex // 20
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 20 * x1 + 80 * (x1 % 4 // 4) + 320 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (20, 4), (4, 1))
assert_size_stride(primals_2, (20,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 20), (20, 1))
assert_size_stride(primals_5, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1280)](buf1,
primals_2, buf4, 1280, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 20), (20, 1), torch.float32)
triton_poi_fused_view_1[grid(1280)](buf1, buf2, 1280, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(20, 2), (1, 20), 0), alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4, 2), (32, 8, 2, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, primals_4, buf4
class DecoderNew(nn.Module):
def __init__(self, latent_dim=4, obs_dim=2, nhidden=20):
super(DecoderNew, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(latent_dim, nhidden)
self.fc2 = nn.Linear(nhidden, obs_dim)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| gaozhihan/torchdiffeq | Decoder | false | 6,719 | [
"MIT"
] | 1 | 414781617d595ba01cc3f23382e25ab890f4ca66 | https://github.com/gaozhihan/torchdiffeq/tree/414781617d595ba01cc3f23382e25ab890f4ca66 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, latent_dim=4, obs_dim=2, nhidden=20):
super().__init__()
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(latent_dim, nhidden)
self.fc2 = nn.Linear(nhidden, obs_dim)
def forward(self, z):
out = self.fc1(z)
out = self.relu(out)
out = self.fc2(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MaskedHuberLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/aj/cajreabiivhhfnqxbo2lekarksjbftn42yofupch5c6wecsnbiwa.py
# Topologically Sorted Source Nodes: [mul, mul_1, l, l_1, mask], Original ATen: [aten.mul, aten.smooth_l1_loss, aten.sum]
# Source node to ATen node mapping:
# l => abs_1, div, lt, mul_2, pow_1, sub, sub_1, where
# l_1 => sum_1
# mask => sum_2
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where, [1, 2]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [1, 2]), kwargs = {})
triton_per_fused_mul_smooth_l1_loss_sum_0 = async_compile.triton('triton_per_fused_mul_smooth_l1_loss_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_smooth_l1_loss_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_smooth_l1_loss_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp3 = tl.load(in_ptr2 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl_math.abs(tmp5)
tmp7 = 1.0
tmp8 = tmp6 < tmp7
tmp9 = tmp6 * tmp6
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = tmp11 * tmp7
tmp13 = tmp6 - tmp10
tmp14 = tl.where(tmp8, tmp12, tmp13)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp21 = tl.where(xmask, tmp19, 0)
tmp22 = tl.sum(tmp21, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp18, xmask)
tl.store(out_ptr1 + (x3), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jv/cjvwzsp2e5riecbgn5gapgn4wd2iy25kqjhiqfzeo5ogp47lu3jg.py
# Topologically Sorted Source Nodes: [l_2, mean], Original ATen: [aten.div, aten.mean]
# Source node to ATen node mapping:
# l_2 => div_1
# mean => mean
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div_1,), kwargs = {})
triton_per_fused_div_mean_1 = async_compile.triton('triton_per_fused_div_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1, l, l_1, mask], Original ATen: [aten.mul, aten.smooth_l1_loss, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_smooth_l1_loss_sum_0.run(arg0_1, arg1_1, arg2_1, buf0, buf1, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [l_2, mean], Original ATen: [aten.div, aten.mean]
triton_per_fused_div_mean_1.run(buf3, buf0, buf1, 1, 16, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaskedHuberLoss(torch.nn.Module):
def __init__(self):
super(MaskedHuberLoss, self).__init__()
def forward(self, output, labels, mask):
lossHuber = nn.SmoothL1Loss(reduction='none')
l = lossHuber(output * mask, labels * mask)
l = l.sum(dim=(1, 2))
mask = mask.sum(dim=(1, 2))
l = l / mask
return l.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_smooth_l1_loss_sum_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp3 = tl.load(in_ptr2 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tl_math.abs(tmp5)
tmp7 = 1.0
tmp8 = tmp6 < tmp7
tmp9 = tmp6 * tmp6
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = tmp11 * tmp7
tmp13 = tmp6 - tmp10
tmp14 = tl.where(tmp8, tmp12, tmp13)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp21 = tl.where(xmask, tmp19, 0)
tmp22 = tl.sum(tmp21, 1)[:, None]
tl.store(out_ptr0 + x3, tmp18, xmask)
tl.store(out_ptr1 + x3, tmp22, xmask)
@triton.jit
def triton_per_fused_div_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_smooth_l1_loss_sum_0[grid(16)](arg0_1, arg1_1,
arg2_1, buf0, buf1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_div_mean_1[grid(1)](buf3, buf0, buf1, 1, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class MaskedHuberLossNew(torch.nn.Module):
def __init__(self):
super(MaskedHuberLossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| gabrieleangeletti/GndNet | MaskedHuberLoss | false | 6,720 | [
"MIT"
] | 1 | 323af65c9c16a725805f480ff799936b77b04d53 | https://github.com/gabrieleangeletti/GndNet/tree/323af65c9c16a725805f480ff799936b77b04d53 | import torch
import torch.nn as nn
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, output, labels, mask):
lossHuber = nn.SmoothL1Loss(reduction='none')
l = lossHuber(output * mask, labels * mask)
l = l.sum(dim=(1, 2))
mask = mask.sum(dim=(1, 2))
l = l / mask
return l.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
Netleaky | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xc/cxcj5l7s6w5ttrq2fk2nirlbp44yesw6n2m2dnxtpcjjmym2njhr.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32), (32, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32), (32, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (4, 64), (64, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 2048, grid=grid(2048), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf2 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf8, 2048, grid=grid(2048), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 64), (1, 32), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf4 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf5, primals_7, buf7, 4096, grid=grid(4096), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 64), (64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0), alpha=1, beta=1, out=buf6)
del primals_9
return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(buf5, (64, 64), (64, 1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Netleaky(nn.Module):
def __init__(self, input_dim, output_dim):
super(Netleaky, self).__init__()
self.linear1 = nn.Linear(input_dim, 32)
self.linear2 = nn.Linear(32, 32)
self.linear3 = nn.Linear(32, 64)
self.linear4 = nn.Linear(64, output_dim)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = self.linear4(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 32), (32, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32), (32, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (4, 64), (64, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1,
primals_2, buf9, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_4, (32, 32), (1, 32), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf3,
primals_5, buf8, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0),
reinterpret_tensor(primals_6, (32, 64), (1, 32), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_1[grid(4096)](buf5,
primals_7, buf7, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_8, (64, 4), (1, 64), 0),
alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(
buf3, (64, 32), (32, 1), 0), reinterpret_tensor(buf5, (64, 64), (64,
1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9
class NetleakyNew(nn.Module):
def __init__(self, input_dim, output_dim):
super(NetleakyNew, self).__init__()
self.linear1 = nn.Linear(input_dim, 32)
self.linear2 = nn.Linear(32, 32)
self.linear3 = nn.Linear(32, 64)
self.linear4 = nn.Linear(64, output_dim)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_8 = self.linear4.weight
primals_9 = self.linear4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| gautam-sharma1/openRL | Netleaky | false | 6,721 | [
"MIT"
] | 1 | 14310a97a328fe5682a01ee85d83a6b5e1ae29ca | https://github.com/gautam-sharma1/openRL/tree/14310a97a328fe5682a01ee85d83a6b5e1ae29ca | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
self.linear1 = nn.Linear(input_dim, 32)
self.linear2 = nn.Linear(32, 32)
self.linear3 = nn.Linear(32, 64)
self.linear4 = nn.Linear(64, output_dim)
def forward(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = self.linear4(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Cartesian | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/v4/cv4ff55kvqufmlwbha3zwifl2iiainuigc3ecu7srjssgsdlhdbq.py
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], -1), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl_math.cos(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr0 + (4*x1), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (1 + (4*x1)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl_math.sin(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
class Cartesian(nn.Module):
def forward(self, x):
r, phi = x[..., 0], x[..., 1]
return torch.stack((r * torch.cos(phi), r * torch.sin(phi)), dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl_math.cos(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp14 = tl.load(in_ptr0 + 4 * x1, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (1 + 4 * x1), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tl_math.sin(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CartesianNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| gbosdet/fastMRI | Cartesian | false | 6,722 | [
"MIT"
] | 1 | 7f94f8006f8919d98fb87788b6dadec9a58d1a3a | https://github.com/gbosdet/fastMRI/tree/7f94f8006f8919d98fb87788b6dadec9a58d1a3a | import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
class Model(nn.Module):
def forward(self, x):
r, phi = x[..., 0], x[..., 1]
return torch.stack((r * torch.cos(phi), r * torch.sin(phi)), dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 16384, grid=grid(16384), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, input_dim, output_dim):
super(Net, self).__init__()
self.linear1 = nn.Linear(input_dim, 256)
self.linear2 = nn.Linear(256, output_dim)
def forward(self, x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf3, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 256),
(256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256),
0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), primals_4, buf3
class NetNew(nn.Module):
def __init__(self, input_dim, output_dim):
super(NetNew, self).__init__()
self.linear1 = nn.Linear(input_dim, 256)
self.linear2 = nn.Linear(256, output_dim)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| gautam-sharma1/openRL | Net | false | 6,723 | [
"MIT"
] | 1 | 14310a97a328fe5682a01ee85d83a6b5e1ae29ca | https://github.com/gautam-sharma1/openRL/tree/14310a97a328fe5682a01ee85d83a6b5e1ae29ca | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
self.linear1 = nn.Linear(input_dim, 256)
self.linear2 = nn.Linear(256, output_dim)
def forward(self, x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
NN_2layer_regression | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# out => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
return (reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class NN_2layer_regression(nn.Module):
def __init__(self, input_dim, interm_dim1, interm_dim2):
super().__init__()
self.d = input_dim
self.interm_dim1 = interm_dim1
self.interm_dim2 = interm_dim2
self.fc1 = nn.Linear(input_dim, interm_dim1)
self.fc2 = nn.Linear(interm_dim1, interm_dim2)
self.fc3 = nn.Linear(interm_dim2, 1)
self.modules_sizes = [self.d * self.interm_dim1, self.interm_dim1,
self.interm_dim1 * self.interm_dim2, self.interm_dim2, self.
interm_dim2, 1]
self.w_opt = None
self.alpha = None
self.mixed_linead_weights = None
def set_mixed_linear_weights(self, w):
self.mixed_linead_weights = self.alpha * w + (1 - self.alpha
) * self.w_opt
self.mixed_linead_weights.retain_grad()
fc_parameters = torch.split(self.mixed_linead_weights, self.
modules_sizes)
ind = 0
for module in self.modules():
if type(module) == nn.Linear:
module.weight = torch.nn.Parameter(fc_parameters[ind].view(
module.weight.shape))
ind += 1
module.bias = torch.nn.Parameter(fc_parameters[ind].view(
module.bias.shape))
ind += 1
def forward(self, x, w=None):
if w is not None:
assert w.requires_grad
assert self.alpha is not None
assert self.w_opt is not None
self.set_mixed_linear_weights(w)
out = torch.tanh(self.fc1(x))
out = torch.tanh(self.fc2(out))
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'interm_dim1': 4, 'interm_dim2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class NN_2layer_regressionNew(nn.Module):
def __init__(self, input_dim, interm_dim1, interm_dim2):
super().__init__()
self.d = input_dim
self.interm_dim1 = interm_dim1
self.interm_dim2 = interm_dim2
self.fc1 = nn.Linear(input_dim, interm_dim1)
self.fc2 = nn.Linear(interm_dim1, interm_dim2)
self.fc3 = nn.Linear(interm_dim2, 1)
self.modules_sizes = [self.d * self.interm_dim1, self.interm_dim1,
self.interm_dim1 * self.interm_dim2, self.interm_dim2, self.
interm_dim2, 1]
self.w_opt = None
self.alpha = None
self.mixed_linead_weights = None
def set_mixed_linear_weights(self, w):
self.mixed_linead_weights = self.alpha * w + (1 - self.alpha
) * self.w_opt
self.mixed_linead_weights.retain_grad()
fc_parameters = torch.split(self.mixed_linead_weights, self.
modules_sizes)
ind = 0
for module in self.modules():
if type(module) == nn.Linear:
module.weight = torch.nn.Parameter(fc_parameters[ind].view(
module.weight.shape))
ind += 1
module.bias = torch.nn.Parameter(fc_parameters[ind].view(
module.bias.shape))
ind += 1
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| gaseln/FLIX_small_scale_experiments | NN_2layer_regression | false | 6,724 | [
"MIT"
] | 1 | af9ebd7f192fc0f67a6a94af7939fd3d6f548bd6 | https://github.com/gaseln/FLIX_small_scale_experiments/tree/af9ebd7f192fc0f67a6a94af7939fd3d6f548bd6 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, input_dim, interm_dim1, interm_dim2):
super().__init__()
self.d = input_dim
self.interm_dim1 = interm_dim1
self.interm_dim2 = interm_dim2
self.fc1 = nn.Linear(input_dim, interm_dim1)
self.fc2 = nn.Linear(interm_dim1, interm_dim2)
self.fc3 = nn.Linear(interm_dim2, 1)
self.modules_sizes = [self.d * self.interm_dim1, self.interm_dim1,
self.interm_dim1 * self.interm_dim2, self.interm_dim2, self.
interm_dim2, 1]
self.w_opt = None
self.alpha = None
self.mixed_linead_weights = None
def set_mixed_linear_weights(self, w):
self.mixed_linead_weights = self.alpha * w + (1 - self.alpha
) * self.w_opt
self.mixed_linead_weights.retain_grad()
fc_parameters = torch.split(self.mixed_linead_weights, self.
modules_sizes)
ind = 0
for module in self.modules():
if type(module) == nn.Linear:
module.weight = torch.nn.Parameter(fc_parameters[ind].view(
module.weight.shape))
ind += 1
module.bias = torch.nn.Parameter(fc_parameters[ind].view(
module.bias.shape))
ind += 1
def forward(self, x, w=None):
if w is not None:
assert w.requires_grad
assert self.alpha is not None
assert self.w_opt is not None
self.set_mixed_linear_weights(w)
out = torch.tanh(self.fc1(x))
out = torch.tanh(self.fc2(out))
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
HighwayLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [gate_output], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# gate_output => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3v/c3vcvf5mcfw3jy7grlk23jx64xlbsyodas33i5qo4yxxd3nicv2m.py
# Topologically Sorted Source Nodes: [transform_output, gate_output, transformation_part, sub, carry_part, add], Original ATen: [aten.relu, aten._softmax, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# add => add
# carry_part => mul_1
# gate_output => div, sum_1
# sub => sub_1
# transform_output => relu
# transformation_part => mul
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %div), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused__softmax_add_mul_relu_rsub_1 = async_compile.triton('triton_poi_fused__softmax_add_mul_relu_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_relu_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_relu_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x3), xmask)
tmp15 = tl.load(in_ptr2 + (x3), xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp11 * tmp8
tmp13 = 1.0
tmp14 = tmp13 - tmp8
tmp16 = tmp14 * tmp15
tmp17 = tmp12 + tmp16
tl.store(in_out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gate_output], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [transform_output, gate_output, transformation_part, sub, carry_part, add], Original ATen: [aten.relu, aten._softmax, aten.mul, aten.rsub, aten.add]
triton_poi_fused__softmax_add_mul_relu_rsub_1.run(buf4, buf2, buf0, primals_3, 256, grid=grid(256), stream=stream0)
del buf2
return (buf4, primals_3, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.onnx.operators
class HighwayLayer(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, x):
transform_output = self.highway_transform_activation(self.
highway_transform(x))
gate_output = self.highway_gate_activation(self.highway_gate(x))
transformation_part = torch.mul(transform_output, gate_output)
carry_part = torch.mul(1 - gate_output, x)
return torch.add(transformation_part, carry_part)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn as nn
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_relu_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + x3, xmask)
tmp15 = tl.load(in_ptr2 + x3, xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp11 * tmp8
tmp13 = 1.0
tmp14 = tmp13 - tmp8
tmp16 = tmp14 * tmp15
tmp17 = tmp12 + tmp16
tl.store(in_out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = buf3
del buf3
triton_poi_fused__softmax_add_mul_relu_rsub_1[grid(256)](buf4, buf2,
buf0, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
return buf4, primals_3, buf0, buf1
class HighwayLayerNew(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, input_0):
primals_1 = self.highway_transform.weight
primals_2 = self.highway_transform.bias
primals_4 = self.highway_gate.weight
primals_5 = self.highway_gate.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| gardenia22/translate | HighwayLayer | false | 6,725 | [
"BSD-3-Clause"
] | 1 | 0be57c8f55b52fc9d39197efa02e05d1c1cda024 | https://github.com/gardenia22/translate/tree/0be57c8f55b52fc9d39197efa02e05d1c1cda024 | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.onnx.operators
class Model(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, x):
transform_output = self.highway_transform_activation(self.
highway_transform(x))
gate_output = self.highway_gate_activation(self.highway_gate(x))
transformation_part = torch.mul(transform_output, gate_output)
carry_part = torch.mul(1 - gate_output, x)
return torch.add(transformation_part, carry_part)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
DPSLTMAdapter | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/l4/cl4boort6vfsvh6h6bfd4lck36kbmtipkqcrnhckuuxer6sfib77.py
# Topologically Sorted Source Nodes: [h_0s], Original ATen: [aten.zeros]
# Source node to ATen node mapping:
# h_0s => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 1, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_zeros_0 = async_compile.triton('triton_poi_fused_zeros_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sw/cswlmqxq57h53l6axfd6psb5uckvpd32zawfihpwp3s4owvqpcsb.py
# Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward]
# Source node to ATen node mapping:
# c_t => add_1
# f_t => sigmoid_1
# g_t => tanh
# h_t => mul_2
# i_t => sigmoid
# mul => mul
# mul_1 => mul_1
# o_t => sigmoid_2
# tanh_1 => tanh_1
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_4,), kwargs = {})
# %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_5,), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_6,), kwargs = {})
# %sigmoid_2 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_7,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %squeeze), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
# %mul_2 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_2, %tanh_1), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid_1), kwargs = {})
# %mul_71 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub_18), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr4 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp16 + tmp17
tmp21 = tmp19 + tmp20
tmp22 = tmp18 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp26 = tmp24 + tmp25
tmp29 = tmp27 + tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.sigmoid(tmp30)
tmp33 = tmp31 * tmp32
tmp34 = tmp7 * tmp23
tmp35 = tmp33 + tmp34
tmp36 = 1.0
tmp37 = tmp36 - tmp31
tmp38 = tmp31 * tmp37
tmp39 = libdevice.tanh(tmp35)
tmp40 = tmp15 * tmp39
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp15, xmask)
tl.store(out_ptr2 + (x2), tmp23, xmask)
tl.store(out_ptr3 + (x2), tmp35, xmask)
tl.store(out_ptr4 + (x2), tmp38, xmask)
tl.store(out_ptr5 + (x2), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tl/ctlt2p573ni7yx4xldsz64sjwqkudoxkn6d6yvb2r54qxlmxds5p.py
# Topologically Sorted Source Nodes: [i_t_1, f_t_1, g_t_1, o_t_1, mul_3, mul_4, c_t_1, tanh_3, h_t_1], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add]
# Source node to ATen node mapping:
# c_t_1 => add_3
# f_t_1 => sigmoid_4
# g_t_1 => tanh_2
# h_t_1 => mul_5
# i_t_1 => sigmoid_3
# mul_3 => mul_3
# mul_4 => mul_4
# o_t_1 => sigmoid_5
# tanh_3 => tanh_3
# Graph fragment:
# %sigmoid_3 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_8,), kwargs = {})
# %sigmoid_4 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_9,), kwargs = {})
# %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_10,), kwargs = {})
# %sigmoid_5 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_11,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_4, %add_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_3, %tanh_2), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %tanh_3 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_3,), kwargs = {})
# %mul_5 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_5, %tanh_3), kwargs = {})
triton_poi_fused_add_mul_sigmoid_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp17 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp20 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp25 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp28 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr4 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp16 + tmp17
tmp21 = tmp19 + tmp20
tmp22 = tmp18 + tmp21
tmp23 = tl.sigmoid(tmp22)
tmp26 = tmp24 + tmp25
tmp29 = tmp27 + tmp28
tmp30 = tmp26 + tmp29
tmp31 = libdevice.tanh(tmp30)
tmp33 = tmp15 * tmp32
tmp34 = tmp7 * tmp31
tmp35 = tmp33 + tmp34
tmp36 = libdevice.tanh(tmp35)
tmp37 = tmp23 * tmp36
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp15, xmask)
tl.store(out_ptr2 + (x2), tmp23, xmask)
tl.store(out_ptr3 + (x2), tmp31, xmask)
tl.store(out_ptr4 + (x2), tmp35, xmask)
tl.store(out_ptr5 + (x2), tmp37, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ks/cksi7m4m7yxvsvd2ce7ftnos6exaibxyudnraey6nxajdfw246hj.py
# Topologically Sorted Source Nodes: [i_t_3, f_t_3, g_t_3, mul_9, mul_10, c_t_3, tanh_7], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add]
# Source node to ATen node mapping:
# c_t_3 => add_7
# f_t_3 => sigmoid_10
# g_t_3 => tanh_6
# i_t_3 => sigmoid_9
# mul_10 => mul_10
# mul_9 => mul_9
# tanh_7 => tanh_7
# Graph fragment:
# %sigmoid_9 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_16,), kwargs = {})
# %sigmoid_10 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_17,), kwargs = {})
# %tanh_6 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem_18,), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_10, %add_5), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_9, %tanh_6), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %mul_10), kwargs = {})
# %tanh_7 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_7,), kwargs = {})
triton_poi_fused_add_mul_sigmoid_tanh_3 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr4 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp16 + tmp17
tmp21 = tmp19 + tmp20
tmp22 = tmp18 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp25 = tmp15 * tmp24
tmp26 = tmp7 * tmp23
tmp27 = tmp25 + tmp26
tmp28 = libdevice.tanh(tmp27)
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp15, xmask)
tl.store(out_ptr2 + (x2), tmp23, xmask)
tl.store(out_ptr3 + (x2), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/o5/co5z4awunowwwrh5of536l26ozrjuwdhph5f2zpgdrfapcc3dduk.py
# Topologically Sorted Source Nodes: [o_t_3], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# o_t_3 => sigmoid_11
# Graph fragment:
# %sigmoid_11 : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_19,), kwargs = {})
triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vm/cvmcreohmmmgtc2sngri4exy6k5t5hutelgjbv763z3d5cnripkm.py
# Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# h_n => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul_2, %mul_5, %mul_8, %mul_11],), kwargs = {})
triton_poi_fused_stack_5 = async_compile.triton('triton_poi_fused_stack_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4*((-4) + x1))), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4*((-8) + x1))), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp20 = tl.load(in_ptr4 + (x0 + (4*((-12) + x1))), tmp16 & xmask, other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp16, tmp21, tmp22)
tmp24 = tl.where(tmp14, tmp15, tmp23)
tmp25 = tl.where(tmp9, tmp10, tmp24)
tmp26 = tl.where(tmp4, tmp5, tmp25)
tl.store(out_ptr0 + (x2), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 1, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_0s], Original ATen: [aten.zeros]
stream0 = get_raw_stream(0)
triton_poi_fused_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [i_t, f_t, g_t, o_t, mul, mul_1, c_t, tanh_1, h_t], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add, aten.sigmoid_backward]
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1.run(buf1, primals_3, buf2, primals_5, buf0, buf3, buf5, buf4, buf6, buf32, buf7, 16, grid=grid(16), stream=stream0)
buf8 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf8)
buf9 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf7, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [i_t_1, f_t_1, g_t_1, o_t_1, mul_3, mul_4, c_t_1, tanh_3, h_t_1], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add]
triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf8, primals_3, buf9, primals_5, buf6, buf10, buf11, buf13, buf12, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf16)
buf17 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf15, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf17)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [i_t_2, f_t_2, g_t_2, o_t_2, mul_6, mul_7, c_t_2, tanh_5, h_t_2], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add]
triton_poi_fused_add_mul_sigmoid_tanh_2.run(buf16, primals_3, buf17, primals_5, buf14, buf18, buf19, buf21, buf20, buf22, buf23, 16, grid=grid(16), stream=stream0)
buf24 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf24)
del primals_2
buf25 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf23, reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf25)
buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [i_t_3, f_t_3, g_t_3, mul_9, mul_10, c_t_3, tanh_7], Original ATen: [aten.sigmoid, aten.tanh, aten.mul, aten.add]
triton_poi_fused_add_mul_sigmoid_tanh_3.run(buf24, primals_3, buf25, primals_5, buf22, buf26, buf27, buf28, buf30, 16, grid=grid(16), stream=stream0)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [o_t_3], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_4.run(buf24, primals_3, buf25, primals_5, buf29, 16, grid=grid(16), stream=stream0)
del buf24
del primals_3
del primals_5
buf31 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0); del buf25 # reuse
# Topologically Sorted Source Nodes: [h_n], Original ATen: [aten.stack]
triton_poi_fused_stack_5.run(buf7, buf15, buf23, buf29, buf30, buf31, 64, grid=grid(64), stream=stream0)
return (reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), buf3, buf4, buf5, buf6, buf7, buf10, buf11, buf12, buf13, buf14, buf15, buf18, buf19, buf20, buf21, buf22, buf23, buf26, buf27, buf28, buf29, buf30, primals_4, buf32, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import Tensor
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Optional
from typing import Union
from typing import List
from typing import Tuple
from torch.nn.utils.rnn import PackedSequence
from torch.nn.utils.rnn import pack_padded_sequence
from typing import Dict
from torch.nn.modules.module import _IncompatibleKeys
def _compute_last_states(h_n: 'List[torch.Tensor]', c_n:
'List[torch.Tensor]', seq_lengths: 'List[int]') ->Tuple[torch.Tensor,
torch.Tensor]:
"""
Given h and c values of all time steps, this function computes the h and c values for each sequence at their last timestep (this can vary across sequences with different sequence lengths).
Args:
h_n: A list of hidden state values across all timesteps.
c_n: A list of cell state values across all timesteps.
seq_lengths: the length parameter used in the torch.nn.utils.rnn.packed_padded_sequence function to create a PackedSequence. This can be computed using the _compute_seq_lengths function.
Returns:
h_last: Contains the last hidden state values for each of the sequences.
If the i'th sequence has a length of l_i, then h_last[i,:] contains the hidden state corresponding to the i'th sequence at timestep l_i.
c_last: The structure is the same as h_last, except that it contains the last cell state values for each of the sequences.
"""
max_batch_size = len(seq_lengths)
hidden_size = h_n[0].shape[-1]
h_last = torch.zeros(max_batch_size, hidden_size)
c_last = torch.zeros(max_batch_size, hidden_size)
for i, seq_len in enumerate(seq_lengths):
h_last[i, :] = h_n[seq_len - 1][i, :]
c_last[i, :] = c_n[seq_len - 1][i, :]
return h_last, c_last
def _compute_seq_lengths(batch_sizes: 'torch.Tensor') ->List[int]:
"""
Computes the sequence lengths (the length parameter used in the packed_padded_sequence function to create a PackedSequence).
Args:
batch_sizes: Contains the batch sizes as stored in a PackedSequence
Returns:
running_seq_lengths: the length parameter used in the torch.nn.utils.rnn.packed_padded_sequence function to create a PackedSequence.
It's a list of the same length as batch_sizes.
"""
max_batch_size = batch_sizes[0]
if len(batch_sizes) == 1:
return [1] * max_batch_size
running_seq = 0
running_seq_lengths = []
for i in range(1, len(batch_sizes)):
delta = batch_sizes[i - 1].item() - batch_sizes[i].item()
running_seq += 1
running_seq_lengths += delta * [running_seq]
running_seq += 1
running_seq_lengths += batch_sizes[-1].item() * [running_seq]
running_seq_lengths.reverse()
return running_seq_lengths
def _concat_sequence_directions(forward:
'Union[List[torch.Tensor], Tuple[torch.Tensor]]', reverse:
'Union[List[torch.Tensor], Tuple[torch.Tensor]]', dim: 'int') ->Tuple[torch
.Tensor]:
"""
Given two list/tuple of same length containing tensors, this function returns a concatenation along dimension d. So, output[i] : concatenation of forward[i] and reverse[i] along dimension dim.
forward[i] and reverse[i] should have the same shape. This function is used for concatenating the outputs of the forward and reverse layer of a bidirectional LSTM.
Args:
forward: list/tuple containing n tensors, representing the output of the forward layer.
reverse: list/tuple containing n tensors, representing the output of the backward layer.
dim: the dimension along which the sequence of tensors within forward and reverse will be concatenated.
Returns:
output: list/tuple containing n concatenated tensors.
"""
if len(forward) != len(reverse):
raise ValueError(
'The forward and reverse layer output sequences should have the same length'
)
seq_length = len(forward)
output = [0] * seq_length
for i in range(seq_length):
output[i] = torch.cat((forward[i], reverse[i]), dim=dim)
return output
def filter_out_old_keys(self, state_dict, prefix, local_metadata):
new_state_dict = {param_name: param_value for param_name, param_value in
state_dict.items() if param_name not in self.old_to_new}
return new_state_dict
class LSTMLinear(nn.Linear):
"""
This function is the same as a nn.Linear layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear)
"""
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPLSTMCell(nn.Module):
"""
Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated
applications of this class.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias)
self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias)
self.reset_parameters()
def reset_parameters(self):
"""
Resets parameters by initializing them from an uniform distribution.
"""
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
"""
Sets max batch length
"""
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
def forward(self, x: 'torch.Tensor', h_prev: 'torch.Tensor', c_prev:
'torch.Tensor', batch_size_t: 'Optional[int]'=None) ->Tuple[torch.
Tensor, torch.Tensor]:
if batch_size_t is None:
gates = self.ih(x) + self.hh(h_prev)
else:
gates = self.ih(x) + self.hh(h_prev[:batch_size_t, :])
i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates,
self.hidden_size, 1)
i_t = torch.sigmoid(i_t_input)
f_t = torch.sigmoid(f_t_input)
g_t = torch.tanh(g_t_input)
o_t = torch.sigmoid(o_t_input)
if batch_size_t is None:
c_t = f_t * c_prev + i_t * g_t
else:
c_t = f_t * c_prev[:batch_size_t, :] + i_t * g_t
h_t = o_t * torch.tanh(c_t)
return h_t, c_t
class DPLSTMLayer(nn.Module):
"""
Implements *one* layer of LSTM in a way amenable to differential privacy.
We don't expect you to use this directly: use DPLSTM instead :)
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
dropout: 'float', reverse: 'bool'=False):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.dropout = dropout
self.reverse = reverse
self.cell = DPLSTMCell(input_size=input_size, hidden_size=
hidden_size, bias=bias)
self.dropout_layer = nn.Dropout(dropout) if dropout > 0 else None
def set_max_batch_length(self, max_batch_length: 'int') ->None:
"""
Sets max batch length. Useful for PackedSequences
"""
self.cell.set_max_batch_length(max_batch_length)
def forward(self, x: 'Union[torch.Tensor, Tuple]', state_init:
'Tuple[torch.Tensor, torch.Tensor]', batch_sizes:
'Optional[torch.Tensor]'=None) ->Tuple[torch.Tensor, Tuple[torch.
Tensor, torch.Tensor]]:
"""
Implements the forward pass of the DPLSTMLayer when a sequence is given in input.
Args:
x: Input sequence to the DPLSTMCell of shape ``[T, B, D]``.
state_init: Initial state of the LSTMCell as a tuple ``(h_0, c_0)``
where ``h_0`` is the initial hidden state and ``c_0`` is the
initial cell state of the DPLSTMCell
batch_sizes: Contains the batch sizes as stored in PackedSequence
Returns:
``output, (h_n, c_n)`` where, ``output`` is of shape ``[T, B, H]`` and is a
tensor containing the output features (``h_t``) from the last layer of the
DPLSTMCell for each timestep ``t``. ``h_n`` is of shape ``[B, H]`` and is a
tensor containing the hidden state for ``t = T``. ``c_n`` is of shape ``[B, H]``
tensor containing the cell state for ``t = T``.
"""
if batch_sizes is not None:
seq_length = batch_sizes.size(0)
if self.reverse:
x = tuple(reversed(x))
batch_sizes = batch_sizes.flip(0)
else:
seq_length, _batch_sz, _ = x.shape
if self.reverse:
x = x.flip(0)
x = torch.unbind(x, dim=0)
h_0, c_0 = state_init
h_n = [h_0]
c_n = [c_0]
batch_size_prev = h_0.shape[0]
for t in range(seq_length):
if batch_sizes is not None:
batch_size_t = batch_sizes[t].item()
delta = batch_size_t - batch_size_prev
if delta > 0:
h_cat = torch.cat((h_n[t], h_0[batch_size_prev:
batch_size_t, :]), 0)
c_cat = torch.cat((c_n[t], c_0[batch_size_prev:
batch_size_t, :]), 0)
h_next, c_next = self.cell(x[t], h_cat, c_cat, batch_size_t
)
else:
h_next, c_next = self.cell(x[t], h_n[t], c_n[t],
batch_size_t)
else:
h_next, c_next = self.cell(x[t], h_n[t], c_n[t])
if self.dropout:
h_next = self.dropout_layer(h_next)
h_n.append(h_next)
c_n.append(c_next)
batch_size_prev = h_next.shape[0]
if batch_sizes is None:
h_n = torch.stack(h_n[1:], dim=0)
return h_n.flip(0) if self.reverse else h_n, (h_n[-1], c_n[-1])
else:
seq_lengths = _compute_seq_lengths(batch_sizes)
h_temp, c_temp = h_n[1:], c_n[1:]
h_last, c_last = _compute_last_states(h_temp, c_temp, seq_lengths)
if self.reverse:
h_temp = tuple(reversed(h_temp))
return h_temp, (h_last, c_last)
class BidirectionalDPLSTMLayer(nn.Module):
"""
Implements *one* layer of Bidirectional LSTM in a way amenable to differential privacy.
We don't expect you to use this directly: use DPLSTM instead :)
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
dropout: 'float'):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.dropout = dropout
self.forward_layer = DPLSTMLayer(input_size=input_size, hidden_size
=hidden_size, bias=bias, dropout=dropout, reverse=False)
self.reverse_layer = DPLSTMLayer(input_size=input_size, hidden_size
=hidden_size, bias=bias, dropout=dropout, reverse=True)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
"""
Sets max batch length
"""
self.forward_layer.set_max_batch_length(max_batch_length)
self.reverse_layer.set_max_batch_length(max_batch_length)
def forward(self, x: 'torch.Tensor', state_init:
'Tuple[torch.Tensor, torch.Tensor]', batch_sizes:
'Optional[torch.Tensor]'=None) ->Tuple[torch.Tensor, Tuple[torch.
Tensor, torch.Tensor]]:
"""
Implements the forward pass of the DPLSTM when a sequence is input.
Dimensions as follows:
- B: Batch size
- T: Sequence length
- D: LSTM input hidden size (eg from a word embedding)
- H: LSTM output hidden size
- P: number of directions (2 if bidirectional, else 1)
Args:
x: Input sequence to the DPLSTM of shape ``[T, B, D]``
state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where
``h_0`` of shape ``[P, B, H]`` contains the initial hidden state, and
``c_0`` of shape ``[P, B, H]`` contains the initial cell state. This
argument can be (and defaults to) None, in which case zero tensors
will be used.
Returns:
``output, (h_n, c_n)`` where, ``output`` is of shape ``[T, B, H * P]`` and is a
tensor containing the output features (``h_t``) from the last layer of the
DPLSTM for each timestep ``t``. ``h_n`` is of shape ``[P, B, H]`` and contains
the hidden state for ``t = T``. ``c_n`` is of shape ``[P, B, H]`` and contains
the cell state for ``t = T``.
"""
h0, c0 = state_init
h0_f, h0_r = h0.unbind(0)
c0_f, c0_r = c0.unbind(0)
out_f, (h_f, c_f) = self.forward_layer(x, (h0_f, c0_f), batch_sizes)
out_r, (h_r, c_r) = self.reverse_layer(x, (h0_r, c0_r), batch_sizes)
if batch_sizes is None:
out = torch.cat([out_f, out_r], dim=-1)
else:
out = _concat_sequence_directions(out_f, out_r, -1)
h = torch.stack([h_f, h_r], dim=0)
c = torch.stack([c_f, c_r], dim=0)
return out, (h, c)
class ParamRenamedModule(nn.Module):
"""
This class defines a nn.Module whose parameters are renamed. This is useful when you want to
reimplement a layer but make sure its state_dict and list of parameters are exactly the same
as another reference layer so that you can have a drop-in replacement that does not depend on
how your layer is actually implemented. In Opacus, this is used for DPLSTM, where our
implementation leverages submodules and requires alignment to the state_dict of nn.LSTM.
"""
def __init__(self, rename_map: 'Dict[str, str]'):
"""
Initializes internal state. Subclass this instead of ``torch.nn.Module`` whenever you need
to rename your model's state.
Args:
rename_map: mapping from old name -> new name for each parameter you want renamed.
Note that this must be a 1:1 mapping!
"""
super().__init__()
self.old_to_new = rename_map
self.new_to_old = {v: k for k, v in rename_map.items()}
self._register_state_dict_hook(filter_out_old_keys)
def _register_renamed_parameters(self):
"""
Internal function. This function simply registers parameters under their new name. They will
automatically mask their duplicates coming from submodules. This trick works because
self.parameters() proceeds recursively from the top, going into submodules after processing
items at the current level, and will not return duplicates.
"""
for old_name, param in super().named_parameters():
if old_name in self.old_to_new:
new_name = self.old_to_new[old_name]
self.register_parameter(new_name, param)
def __setattr__(self, name: 'str', value: 'Union[Tensor, nn.Module]'
) ->None:
"""
Whenever you set an attribute, eg `self.linear`, this is called to actually register it in
any nn.Module. We rely on the masking trick explained in the docs for
``_register_renamed_parameters`` to make sure we replace things only once. If a new parameter
in the rename list is detected, we rename and mask it so next time this is called we will
no longer find it.
"""
super().__setattr__(name, value)
try:
self._register_renamed_parameters()
except AttributeError:
pass
def load_state_dict(self, state_dict: 'Dict[str, Tensor]', strict:
'bool'=True):
"""
Identical to ``torch.nn.Module.load_state_dict()`` but handles the renamed keys.
"""
missing_keys, unexpected_keys = super().load_state_dict(state_dict,
strict=False)
missing_keys = [k for k in missing_keys if k not in self.old_to_new]
if strict:
error_msgs = []
if len(unexpected_keys) > 0:
error_msgs.insert(0,
'Unexpected key(s) in state_dict: {}. '.format(', '.
join('"{}"'.format(k) for k in unexpected_keys)))
if len(missing_keys) > 0:
error_msgs.insert(0, 'Missing key(s) in state_dict: {}. '.
format(', '.join('"{}"'.format(k) for k in missing_keys)))
if len(error_msgs) > 0:
raise RuntimeError(
'Error(s) in loading state_dict for {}:\n\t{}'.format(
self.__class__.__name__, '\n\t'.join(error_msgs)))
return _IncompatibleKeys(missing_keys, unexpected_keys)
class DPLSTM(ParamRenamedModule):
"""
DP-friendly drop-in replacement of the ``torch.nn.LSTM`` module.
Its state_dict matches that of nn.LSTM exactly, so that after training it can be exported
and loaded by an nn.LSTM for inference.
Refer to nn.LSTM's documentation for all parameters and inputs.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', num_layers:
'int'=1, bias: 'bool'=True, batch_first: 'bool'=False, dropout:
'float'=0, bidirectional: 'bool'=False):
rename_dict = self._make_rename_dict(num_layers, bias, bidirectional)
super().__init__(rename_dict)
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.bidirectional = bidirectional
self.num_directions = 2 if self.bidirectional else 1
LayerClass = BidirectionalDPLSTMLayer if bidirectional else DPLSTMLayer
self.layers = nn.ModuleList([LayerClass(input_size=self.input_size if
i == 0 else self.hidden_size * self.num_directions, hidden_size
=self.hidden_size, bias=self.bias, dropout=self.dropout if i <
self.num_layers - 1 else 0) for i in range(num_layers)])
def forward(self, x: 'Union[torch.Tensor, PackedSequence]', state_init:
'Optional[Tuple[torch.Tensor, torch.Tensor]]'=None) ->Tuple[torch.
Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Implements the forward pass of the DPLSTM when a sequence is input.
Dimensions as follows:
- B: Batch size
- T: Sequence length
- D: LSTM input hidden size (eg from a word embedding)
- H: LSTM output hidden size
- L: number of layers in the LSTM
- P: number of directions (2 if bidirectional, else 1)
Args:
x: Input sequence to the DPLSTM of shape ``[T, B, D]``. Or it can be a PackedSequence.
state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where:
- ``h_0`` of shape ``[L*P, B, H]`` contains the initial hidden state
- ``c_0`` of shape ``[L*P, B, H]`` contains the initial cell state
This argument can be (and defaults to) None, in which case zero tensors will be used.
Returns:
``output, (h_n, c_n)`` where, ``output`` is of shape ``[T, B, H * P]`` and is a
tensor containing the output features (``h_t``) from the last layer of the DPLSTM
for each timestep ``t``. ``h_n`` is of shape ``[L * P, B, H]`` and contains the
hidden state for ``t = T``. ``c_n`` is of shape ``[L * P, B, H]`` and contains
the cell state for ``t = T``.
"""
if isinstance(x, PackedSequence):
x, batch_sizes, sorted_indices, unsorted_indices = x
B = batch_sizes[0].item()
_, _D = x.shape
x = x.split(tuple(batch_sizes))
for layer in self.layers:
layer.set_max_batch_length(B)
else:
sorted_indices = None
unsorted_indices = None
batch_sizes = None
x = self._rearrange_batch_dim(x)
_T, B, _D = x.shape
L = self.num_layers
P = 2 if self.bidirectional else 1
H = self.hidden_size
h_0s, c_0s = state_init or (None, None)
if h_0s is None:
h_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype,
device=x[0].device)
else:
h_0s = h_0s.reshape([L, P, B, H])
h_0s = self._permute_hidden(h_0s, sorted_indices, 2)
if c_0s is None:
c_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype,
device=x[0].device)
else:
c_0s = c_0s.reshape([L, P, B, H])
c_0s = self._permute_hidden(c_0s, sorted_indices, 2)
hs: 'List[torch.Tensor]' = []
cs: 'List[torch.Tensor]' = []
for layer, h0, c0 in zip(self.layers, h_0s, c_0s):
if not self.bidirectional:
h0 = h0.squeeze(0)
c0 = c0.squeeze(0)
x, (h, c) = layer(x, (h0, c0), batch_sizes)
if not self.bidirectional:
h = h.unsqueeze(0)
c = c.unsqueeze(0)
hs.append(h)
cs.append(c)
hs = torch.cat(hs, dim=0)
cs = torch.cat(cs, dim=0)
if batch_sizes is not None:
seq_lengths = _compute_seq_lengths(batch_sizes)
packed_data = pack_padded_sequence(pad_sequence(x, batch_first=
False), seq_lengths, batch_first=True)[0]
out = PackedSequence(packed_data, batch_sizes, sorted_indices,
unsorted_indices)
else:
out = self._rearrange_batch_dim(x)
return out, (self._permute_hidden(hs, unsorted_indices), self.
_permute_hidden(cs, unsorted_indices))
def _permute_hidden(self, x: 'torch.Tensor', permutation:
'Optional[torch.Tensor]'=None, dim: 'int'=1) ->torch.Tensor:
if permutation is None:
return x
if dim == 1:
return x[:, permutation, :]
elif dim == 2:
return x[:, :, permutation, :]
def _rearrange_batch_dim(self, x: 'torch.Tensor') ->torch.Tensor:
if self.batch_first:
x = x.transpose(0, 1)
return x
def __repr__(self):
s = f'DPLSTM({self.input_size}, {self.hidden_size}, bias={self.bias}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
if self.num_layers > 1:
s += f', num_layers={self.num_layers}'
if self.dropout:
s += f', dropout={self.dropout}'
if self.bidirectional:
s += f', bidirectional={self.bidirectional}'
return s
def _make_rename_dict(self, num_layers, bias, bidirectional):
"""
Programmatically constructs a dictionary old_name -> new_name to align with the param
names used in ``torch.nn.LSTM``.
"""
d = {}
components = ['weight'] + ['bias' if bias else []]
matrices = ['ih', 'hh']
for i in range(num_layers):
for c in components:
for m in matrices:
nn_name = f'{c}_{m}_l{i}'
if bidirectional:
d[f'layers.{i}.forward_layer.cell.{m}.{c}'] = nn_name
d[f'layers.{i}.reverse_layer.cell.{m}.{c}'
] = nn_name + '_reverse'
else:
d[f'layers.{i}.cell.{m}.{c}'] = nn_name
return d
class DPSLTMAdapter(nn.Module):
"""
Adapter for DPLSTM.
LSTM returns a tuple, but our testing tools need the model to return a single tensor in output.
We do this adaption here.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.dplstm = DPLSTM(*args, **kwargs)
def forward(self, x):
out, _rest = self.dplstm(x)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import Tensor
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Optional
from typing import Union
from typing import List
from typing import Tuple
from torch.nn.utils.rnn import PackedSequence
from torch.nn.utils.rnn import pack_padded_sequence
from typing import Dict
from torch.nn.modules.module import _IncompatibleKeys
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2,
out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp25 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp28 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp16 + tmp17
tmp21 = tmp19 + tmp20
tmp22 = tmp18 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp26 = tmp24 + tmp25
tmp29 = tmp27 + tmp28
tmp30 = tmp26 + tmp29
tmp31 = tl.sigmoid(tmp30)
tmp33 = tmp31 * tmp32
tmp34 = tmp7 * tmp23
tmp35 = tmp33 + tmp34
tmp36 = 1.0
tmp37 = tmp36 - tmp31
tmp38 = tmp31 * tmp37
tmp39 = libdevice.tanh(tmp35)
tmp40 = tmp15 * tmp39
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp23, xmask)
tl.store(out_ptr3 + x2, tmp35, xmask)
tl.store(out_ptr4 + x2, tmp38, xmask)
tl.store(out_ptr5 + x2, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4,
out_ptr5, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp17 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp20 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp25 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp28 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp16 + tmp17
tmp21 = tmp19 + tmp20
tmp22 = tmp18 + tmp21
tmp23 = tl.sigmoid(tmp22)
tmp26 = tmp24 + tmp25
tmp29 = tmp27 + tmp28
tmp30 = tmp26 + tmp29
tmp31 = libdevice.tanh(tmp30)
tmp33 = tmp15 * tmp32
tmp34 = tmp7 * tmp31
tmp35 = tmp33 + tmp34
tmp36 = libdevice.tanh(tmp35)
tmp37 = tmp23 * tmp36
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp23, xmask)
tl.store(out_ptr3 + x2, tmp31, xmask)
tl.store(out_ptr4 + x2, tmp35, xmask)
tl.store(out_ptr5 + x2, tmp37, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp17 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp20 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tmp10 = tmp8 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = tl.sigmoid(tmp14)
tmp18 = tmp16 + tmp17
tmp21 = tmp19 + tmp20
tmp22 = tmp18 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp25 = tmp15 * tmp24
tmp26 = tmp7 * tmp23
tmp27 = tmp25 + tmp26
tmp28 = libdevice.tanh(tmp27)
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp15, xmask)
tl.store(out_ptr2 + x2, tmp23, xmask)
tl.store(out_ptr3 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.sigmoid(tmp6)
tl.store(out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp20 = tl.load(in_ptr4 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0)
tmp21 = tmp19 * tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp16, tmp21, tmp22)
tmp24 = tl.where(tmp14, tmp15, tmp23)
tmp25 = tl.where(tmp9, tmp10, tmp24)
tmp26 = tl.where(tmp4, tmp5, tmp25)
tl.store(out_ptr0 + x2, tmp26, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 4, 4), (16, 1, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(16)](buf1
, primals_3, buf2, primals_5, buf0, buf3, buf5, buf4, buf6,
buf32, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf8 = buf2
del buf2
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf8)
buf9 = buf1
del buf1
extern_kernels.mm(buf7, reinterpret_tensor(primals_4, (4, 16), (1,
4), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf8, primals_3,
buf9, primals_5, buf6, buf10, buf11, buf13, buf12, buf14, buf15,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = buf9
del buf9
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf16)
buf17 = buf8
del buf8
extern_kernels.mm(buf15, reinterpret_tensor(primals_4, (4, 16), (1,
4), 0), out=buf17)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_2[grid(16)](buf16, primals_3,
buf17, primals_5, buf14, buf18, buf19, buf21, buf20, buf22,
buf23, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf24 = buf17
del buf17
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48),
reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf24)
del primals_2
buf25 = buf16
del buf16
extern_kernels.mm(buf23, reinterpret_tensor(primals_4, (4, 16), (1,
4), 0), out=buf25)
buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_tanh_3[grid(16)](buf24, primals_3,
buf25, primals_5, buf22, buf26, buf27, buf28, buf30, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_sigmoid_4[grid(16)](buf24, primals_3, buf25,
primals_5, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf24
del primals_3
del primals_5
buf31 = reinterpret_tensor(buf25, (16, 4), (4, 1), 0)
del buf25
triton_poi_fused_stack_5[grid(64)](buf7, buf15, buf23, buf29, buf30,
buf31, 64, XBLOCK=64, num_warps=1, num_stages=1)
return (reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0),
reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(
primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4),
(4, 1), 16), reinterpret_tensor(primals_1, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), buf3, buf4, buf5,
buf6, buf7, buf10, buf11, buf12, buf13, buf14, buf15, buf18, buf19,
buf20, buf21, buf22, buf23, buf26, buf27, buf28, buf29, buf30,
primals_4, buf32)
def _compute_last_states(h_n: 'List[torch.Tensor]', c_n:
'List[torch.Tensor]', seq_lengths: 'List[int]') ->Tuple[torch.Tensor,
torch.Tensor]:
"""
Given h and c values of all time steps, this function computes the h and c values for each sequence at their last timestep (this can vary across sequences with different sequence lengths).
Args:
h_n: A list of hidden state values across all timesteps.
c_n: A list of cell state values across all timesteps.
seq_lengths: the length parameter used in the torch.nn.utils.rnn.packed_padded_sequence function to create a PackedSequence. This can be computed using the _compute_seq_lengths function.
Returns:
h_last: Contains the last hidden state values for each of the sequences.
If the i'th sequence has a length of l_i, then h_last[i,:] contains the hidden state corresponding to the i'th sequence at timestep l_i.
c_last: The structure is the same as h_last, except that it contains the last cell state values for each of the sequences.
"""
max_batch_size = len(seq_lengths)
hidden_size = h_n[0].shape[-1]
h_last = torch.zeros(max_batch_size, hidden_size)
c_last = torch.zeros(max_batch_size, hidden_size)
for i, seq_len in enumerate(seq_lengths):
h_last[i, :] = h_n[seq_len - 1][i, :]
c_last[i, :] = c_n[seq_len - 1][i, :]
return h_last, c_last
def _compute_seq_lengths(batch_sizes: 'torch.Tensor') ->List[int]:
"""
Computes the sequence lengths (the length parameter used in the packed_padded_sequence function to create a PackedSequence).
Args:
batch_sizes: Contains the batch sizes as stored in a PackedSequence
Returns:
running_seq_lengths: the length parameter used in the torch.nn.utils.rnn.packed_padded_sequence function to create a PackedSequence.
It's a list of the same length as batch_sizes.
"""
max_batch_size = batch_sizes[0]
if len(batch_sizes) == 1:
return [1] * max_batch_size
running_seq = 0
running_seq_lengths = []
for i in range(1, len(batch_sizes)):
delta = batch_sizes[i - 1].item() - batch_sizes[i].item()
running_seq += 1
running_seq_lengths += delta * [running_seq]
running_seq += 1
running_seq_lengths += batch_sizes[-1].item() * [running_seq]
running_seq_lengths.reverse()
return running_seq_lengths
def _concat_sequence_directions(forward:
'Union[List[torch.Tensor], Tuple[torch.Tensor]]', reverse:
'Union[List[torch.Tensor], Tuple[torch.Tensor]]', dim: 'int') ->Tuple[torch
.Tensor]:
"""
Given two list/tuple of same length containing tensors, this function returns a concatenation along dimension d. So, output[i] : concatenation of forward[i] and reverse[i] along dimension dim.
forward[i] and reverse[i] should have the same shape. This function is used for concatenating the outputs of the forward and reverse layer of a bidirectional LSTM.
Args:
forward: list/tuple containing n tensors, representing the output of the forward layer.
reverse: list/tuple containing n tensors, representing the output of the backward layer.
dim: the dimension along which the sequence of tensors within forward and reverse will be concatenated.
Returns:
output: list/tuple containing n concatenated tensors.
"""
if len(forward) != len(reverse):
raise ValueError(
'The forward and reverse layer output sequences should have the same length'
)
seq_length = len(forward)
output = [0] * seq_length
for i in range(seq_length):
output[i] = torch.cat((forward[i], reverse[i]), dim=dim)
return output
def filter_out_old_keys(self, state_dict, prefix, local_metadata):
new_state_dict = {param_name: param_value for param_name, param_value in
state_dict.items() if param_name not in self.old_to_new}
return new_state_dict
class LSTMLinear(nn.Linear):
"""
This function is the same as a nn.Linear layer, except that in the backward pass
the grad_samples get accumulated (instead of being concatenated as in the standard
nn.Linear)
"""
def __init__(self, in_features: 'int', out_features: 'int', bias:
'bool'=True):
super().__init__(in_features, out_features, bias)
class DPLSTMCell(nn.Module):
"""
Internal-only class. Implements *one* step of LSTM so that a LSTM layer can be seen as repeated
applications of this class.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool'):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.ih = LSTMLinear(input_size, 4 * hidden_size, bias=self.bias)
self.hh = LSTMLinear(hidden_size, 4 * hidden_size, bias=self.bias)
self.reset_parameters()
def reset_parameters(self):
"""
Resets parameters by initializing them from an uniform distribution.
"""
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
"""
Sets max batch length
"""
self.ih.max_batch_len = max_batch_length
self.hh.max_batch_len = max_batch_length
def forward(self, x: 'torch.Tensor', h_prev: 'torch.Tensor', c_prev:
'torch.Tensor', batch_size_t: 'Optional[int]'=None) ->Tuple[torch.
Tensor, torch.Tensor]:
if batch_size_t is None:
gates = self.ih(x) + self.hh(h_prev)
else:
gates = self.ih(x) + self.hh(h_prev[:batch_size_t, :])
i_t_input, f_t_input, g_t_input, o_t_input = torch.split(gates,
self.hidden_size, 1)
i_t = torch.sigmoid(i_t_input)
f_t = torch.sigmoid(f_t_input)
g_t = torch.tanh(g_t_input)
o_t = torch.sigmoid(o_t_input)
if batch_size_t is None:
c_t = f_t * c_prev + i_t * g_t
else:
c_t = f_t * c_prev[:batch_size_t, :] + i_t * g_t
h_t = o_t * torch.tanh(c_t)
return h_t, c_t
class DPLSTMLayer(nn.Module):
"""
Implements *one* layer of LSTM in a way amenable to differential privacy.
We don't expect you to use this directly: use DPLSTM instead :)
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
dropout: 'float', reverse: 'bool'=False):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.dropout = dropout
self.reverse = reverse
self.cell = DPLSTMCell(input_size=input_size, hidden_size=
hidden_size, bias=bias)
self.dropout_layer = nn.Dropout(dropout) if dropout > 0 else None
def set_max_batch_length(self, max_batch_length: 'int') ->None:
"""
Sets max batch length. Useful for PackedSequences
"""
self.cell.set_max_batch_length(max_batch_length)
def forward(self, x: 'Union[torch.Tensor, Tuple]', state_init:
'Tuple[torch.Tensor, torch.Tensor]', batch_sizes:
'Optional[torch.Tensor]'=None) ->Tuple[torch.Tensor, Tuple[torch.
Tensor, torch.Tensor]]:
"""
Implements the forward pass of the DPLSTMLayer when a sequence is given in input.
Args:
x: Input sequence to the DPLSTMCell of shape ``[T, B, D]``.
state_init: Initial state of the LSTMCell as a tuple ``(h_0, c_0)``
where ``h_0`` is the initial hidden state and ``c_0`` is the
initial cell state of the DPLSTMCell
batch_sizes: Contains the batch sizes as stored in PackedSequence
Returns:
``output, (h_n, c_n)`` where, ``output`` is of shape ``[T, B, H]`` and is a
tensor containing the output features (``h_t``) from the last layer of the
DPLSTMCell for each timestep ``t``. ``h_n`` is of shape ``[B, H]`` and is a
tensor containing the hidden state for ``t = T``. ``c_n`` is of shape ``[B, H]``
tensor containing the cell state for ``t = T``.
"""
if batch_sizes is not None:
seq_length = batch_sizes.size(0)
if self.reverse:
x = tuple(reversed(x))
batch_sizes = batch_sizes.flip(0)
else:
seq_length, _batch_sz, _ = x.shape
if self.reverse:
x = x.flip(0)
x = torch.unbind(x, dim=0)
h_0, c_0 = state_init
h_n = [h_0]
c_n = [c_0]
batch_size_prev = h_0.shape[0]
for t in range(seq_length):
if batch_sizes is not None:
batch_size_t = batch_sizes[t].item()
delta = batch_size_t - batch_size_prev
if delta > 0:
h_cat = torch.cat((h_n[t], h_0[batch_size_prev:
batch_size_t, :]), 0)
c_cat = torch.cat((c_n[t], c_0[batch_size_prev:
batch_size_t, :]), 0)
h_next, c_next = self.cell(x[t], h_cat, c_cat, batch_size_t
)
else:
h_next, c_next = self.cell(x[t], h_n[t], c_n[t],
batch_size_t)
else:
h_next, c_next = self.cell(x[t], h_n[t], c_n[t])
if self.dropout:
h_next = self.dropout_layer(h_next)
h_n.append(h_next)
c_n.append(c_next)
batch_size_prev = h_next.shape[0]
if batch_sizes is None:
h_n = torch.stack(h_n[1:], dim=0)
return h_n.flip(0) if self.reverse else h_n, (h_n[-1], c_n[-1])
else:
seq_lengths = _compute_seq_lengths(batch_sizes)
h_temp, c_temp = h_n[1:], c_n[1:]
h_last, c_last = _compute_last_states(h_temp, c_temp, seq_lengths)
if self.reverse:
h_temp = tuple(reversed(h_temp))
return h_temp, (h_last, c_last)
class BidirectionalDPLSTMLayer(nn.Module):
"""
Implements *one* layer of Bidirectional LSTM in a way amenable to differential privacy.
We don't expect you to use this directly: use DPLSTM instead :)
"""
def __init__(self, input_size: 'int', hidden_size: 'int', bias: 'bool',
dropout: 'float'):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.dropout = dropout
self.forward_layer = DPLSTMLayer(input_size=input_size, hidden_size
=hidden_size, bias=bias, dropout=dropout, reverse=False)
self.reverse_layer = DPLSTMLayer(input_size=input_size, hidden_size
=hidden_size, bias=bias, dropout=dropout, reverse=True)
def set_max_batch_length(self, max_batch_length: 'int') ->None:
"""
Sets max batch length
"""
self.forward_layer.set_max_batch_length(max_batch_length)
self.reverse_layer.set_max_batch_length(max_batch_length)
def forward(self, x: 'torch.Tensor', state_init:
'Tuple[torch.Tensor, torch.Tensor]', batch_sizes:
'Optional[torch.Tensor]'=None) ->Tuple[torch.Tensor, Tuple[torch.
Tensor, torch.Tensor]]:
"""
Implements the forward pass of the DPLSTM when a sequence is input.
Dimensions as follows:
- B: Batch size
- T: Sequence length
- D: LSTM input hidden size (eg from a word embedding)
- H: LSTM output hidden size
- P: number of directions (2 if bidirectional, else 1)
Args:
x: Input sequence to the DPLSTM of shape ``[T, B, D]``
state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where
``h_0`` of shape ``[P, B, H]`` contains the initial hidden state, and
``c_0`` of shape ``[P, B, H]`` contains the initial cell state. This
argument can be (and defaults to) None, in which case zero tensors
will be used.
Returns:
``output, (h_n, c_n)`` where, ``output`` is of shape ``[T, B, H * P]`` and is a
tensor containing the output features (``h_t``) from the last layer of the
DPLSTM for each timestep ``t``. ``h_n`` is of shape ``[P, B, H]`` and contains
the hidden state for ``t = T``. ``c_n`` is of shape ``[P, B, H]`` and contains
the cell state for ``t = T``.
"""
h0, c0 = state_init
h0_f, h0_r = h0.unbind(0)
c0_f, c0_r = c0.unbind(0)
out_f, (h_f, c_f) = self.forward_layer(x, (h0_f, c0_f), batch_sizes)
out_r, (h_r, c_r) = self.reverse_layer(x, (h0_r, c0_r), batch_sizes)
if batch_sizes is None:
out = torch.cat([out_f, out_r], dim=-1)
else:
out = _concat_sequence_directions(out_f, out_r, -1)
h = torch.stack([h_f, h_r], dim=0)
c = torch.stack([c_f, c_r], dim=0)
return out, (h, c)
class ParamRenamedModule(nn.Module):
"""
This class defines a nn.Module whose parameters are renamed. This is useful when you want to
reimplement a layer but make sure its state_dict and list of parameters are exactly the same
as another reference layer so that you can have a drop-in replacement that does not depend on
how your layer is actually implemented. In Opacus, this is used for DPLSTM, where our
implementation leverages submodules and requires alignment to the state_dict of nn.LSTM.
"""
def __init__(self, rename_map: 'Dict[str, str]'):
"""
Initializes internal state. Subclass this instead of ``torch.nn.Module`` whenever you need
to rename your model's state.
Args:
rename_map: mapping from old name -> new name for each parameter you want renamed.
Note that this must be a 1:1 mapping!
"""
super().__init__()
self.old_to_new = rename_map
self.new_to_old = {v: k for k, v in rename_map.items()}
self._register_state_dict_hook(filter_out_old_keys)
def _register_renamed_parameters(self):
"""
Internal function. This function simply registers parameters under their new name. They will
automatically mask their duplicates coming from submodules. This trick works because
self.parameters() proceeds recursively from the top, going into submodules after processing
items at the current level, and will not return duplicates.
"""
for old_name, param in super().named_parameters():
if old_name in self.old_to_new:
new_name = self.old_to_new[old_name]
self.register_parameter(new_name, param)
def __setattr__(self, name: 'str', value: 'Union[Tensor, nn.Module]'
) ->None:
"""
Whenever you set an attribute, eg `self.linear`, this is called to actually register it in
any nn.Module. We rely on the masking trick explained in the docs for
``_register_renamed_parameters`` to make sure we replace things only once. If a new parameter
in the rename list is detected, we rename and mask it so next time this is called we will
no longer find it.
"""
super().__setattr__(name, value)
try:
self._register_renamed_parameters()
except AttributeError:
pass
def load_state_dict(self, state_dict: 'Dict[str, Tensor]', strict:
'bool'=True):
"""
Identical to ``torch.nn.Module.load_state_dict()`` but handles the renamed keys.
"""
missing_keys, unexpected_keys = super().load_state_dict(state_dict,
strict=False)
missing_keys = [k for k in missing_keys if k not in self.old_to_new]
if strict:
error_msgs = []
if len(unexpected_keys) > 0:
error_msgs.insert(0,
'Unexpected key(s) in state_dict: {}. '.format(', '.
join('"{}"'.format(k) for k in unexpected_keys)))
if len(missing_keys) > 0:
error_msgs.insert(0, 'Missing key(s) in state_dict: {}. '.
format(', '.join('"{}"'.format(k) for k in missing_keys)))
if len(error_msgs) > 0:
raise RuntimeError(
'Error(s) in loading state_dict for {}:\n\t{}'.format(
self.__class__.__name__, '\n\t'.join(error_msgs)))
return _IncompatibleKeys(missing_keys, unexpected_keys)
class DPLSTM(ParamRenamedModule):
"""
DP-friendly drop-in replacement of the ``torch.nn.LSTM`` module.
Its state_dict matches that of nn.LSTM exactly, so that after training it can be exported
and loaded by an nn.LSTM for inference.
Refer to nn.LSTM's documentation for all parameters and inputs.
"""
def __init__(self, input_size: 'int', hidden_size: 'int', num_layers:
'int'=1, bias: 'bool'=True, batch_first: 'bool'=False, dropout:
'float'=0, bidirectional: 'bool'=False):
rename_dict = self._make_rename_dict(num_layers, bias, bidirectional)
super().__init__(rename_dict)
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.bidirectional = bidirectional
self.num_directions = 2 if self.bidirectional else 1
LayerClass = BidirectionalDPLSTMLayer if bidirectional else DPLSTMLayer
self.layers = nn.ModuleList([LayerClass(input_size=self.input_size if
i == 0 else self.hidden_size * self.num_directions, hidden_size
=self.hidden_size, bias=self.bias, dropout=self.dropout if i <
self.num_layers - 1 else 0) for i in range(num_layers)])
def forward(self, x: 'Union[torch.Tensor, PackedSequence]', state_init:
'Optional[Tuple[torch.Tensor, torch.Tensor]]'=None) ->Tuple[torch.
Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Implements the forward pass of the DPLSTM when a sequence is input.
Dimensions as follows:
- B: Batch size
- T: Sequence length
- D: LSTM input hidden size (eg from a word embedding)
- H: LSTM output hidden size
- L: number of layers in the LSTM
- P: number of directions (2 if bidirectional, else 1)
Args:
x: Input sequence to the DPLSTM of shape ``[T, B, D]``. Or it can be a PackedSequence.
state_init: Initial state of the LSTM as a tuple ``(h_0, c_0)``, where:
- ``h_0`` of shape ``[L*P, B, H]`` contains the initial hidden state
- ``c_0`` of shape ``[L*P, B, H]`` contains the initial cell state
This argument can be (and defaults to) None, in which case zero tensors will be used.
Returns:
``output, (h_n, c_n)`` where, ``output`` is of shape ``[T, B, H * P]`` and is a
tensor containing the output features (``h_t``) from the last layer of the DPLSTM
for each timestep ``t``. ``h_n`` is of shape ``[L * P, B, H]`` and contains the
hidden state for ``t = T``. ``c_n`` is of shape ``[L * P, B, H]`` and contains
the cell state for ``t = T``.
"""
if isinstance(x, PackedSequence):
x, batch_sizes, sorted_indices, unsorted_indices = x
B = batch_sizes[0].item()
_, _D = x.shape
x = x.split(tuple(batch_sizes))
for layer in self.layers:
layer.set_max_batch_length(B)
else:
sorted_indices = None
unsorted_indices = None
batch_sizes = None
x = self._rearrange_batch_dim(x)
_T, B, _D = x.shape
L = self.num_layers
P = 2 if self.bidirectional else 1
H = self.hidden_size
h_0s, c_0s = state_init or (None, None)
if h_0s is None:
h_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype,
device=x[0].device)
else:
h_0s = h_0s.reshape([L, P, B, H])
h_0s = self._permute_hidden(h_0s, sorted_indices, 2)
if c_0s is None:
c_0s = torch.zeros(L, P, B, self.hidden_size, dtype=x[0].dtype,
device=x[0].device)
else:
c_0s = c_0s.reshape([L, P, B, H])
c_0s = self._permute_hidden(c_0s, sorted_indices, 2)
hs: 'List[torch.Tensor]' = []
cs: 'List[torch.Tensor]' = []
for layer, h0, c0 in zip(self.layers, h_0s, c_0s):
if not self.bidirectional:
h0 = h0.squeeze(0)
c0 = c0.squeeze(0)
x, (h, c) = layer(x, (h0, c0), batch_sizes)
if not self.bidirectional:
h = h.unsqueeze(0)
c = c.unsqueeze(0)
hs.append(h)
cs.append(c)
hs = torch.cat(hs, dim=0)
cs = torch.cat(cs, dim=0)
if batch_sizes is not None:
seq_lengths = _compute_seq_lengths(batch_sizes)
packed_data = pack_padded_sequence(pad_sequence(x, batch_first=
False), seq_lengths, batch_first=True)[0]
out = PackedSequence(packed_data, batch_sizes, sorted_indices,
unsorted_indices)
else:
out = self._rearrange_batch_dim(x)
return out, (self._permute_hidden(hs, unsorted_indices), self.
_permute_hidden(cs, unsorted_indices))
def _permute_hidden(self, x: 'torch.Tensor', permutation:
'Optional[torch.Tensor]'=None, dim: 'int'=1) ->torch.Tensor:
if permutation is None:
return x
if dim == 1:
return x[:, permutation, :]
elif dim == 2:
return x[:, :, permutation, :]
def _rearrange_batch_dim(self, x: 'torch.Tensor') ->torch.Tensor:
if self.batch_first:
x = x.transpose(0, 1)
return x
def __repr__(self):
s = f'DPLSTM({self.input_size}, {self.hidden_size}, bias={self.bias}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
if self.num_layers > 1:
s += f', num_layers={self.num_layers}'
if self.dropout:
s += f', dropout={self.dropout}'
if self.bidirectional:
s += f', bidirectional={self.bidirectional}'
return s
def _make_rename_dict(self, num_layers, bias, bidirectional):
"""
Programmatically constructs a dictionary old_name -> new_name to align with the param
names used in ``torch.nn.LSTM``.
"""
d = {}
components = ['weight'] + ['bias' if bias else []]
matrices = ['ih', 'hh']
for i in range(num_layers):
for c in components:
for m in matrices:
nn_name = f'{c}_{m}_l{i}'
if bidirectional:
d[f'layers.{i}.forward_layer.cell.{m}.{c}'] = nn_name
d[f'layers.{i}.reverse_layer.cell.{m}.{c}'
] = nn_name + '_reverse'
else:
d[f'layers.{i}.cell.{m}.{c}'] = nn_name
return d
class DPSLTMAdapterNew(nn.Module):
"""
Adapter for DPLSTM.
LSTM returns a tuple, but our testing tools need the model to return a single tensor in output.
We do this adaption here.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.dplstm = DPLSTM(*args, **kwargs)
def forward(self, input_0):
primals_2 = self.dplstm.weight_ih_l0
primals_3 = self.dplstm.bias_ih_l0
primals_4 = self.dplstm.weight_hh_l0
primals_5 = self.dplstm.bias_hh_l0
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ffuuugor/opacus | DPSLTMAdapter | false | 6,726 | [
"Apache-2.0"
] | 1 | 2048a6e92902685c2a735e9fb7c0d48b4846b494 | https://github.com/ffuuugor/opacus/tree/2048a6e92902685c2a735e9fb7c0d48b4846b494 | import math
import torch
from torch import Tensor
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from typing import Optional
from typing import Union
from typing import List
from typing import Tuple
from torch.nn.utils.rnn import PackedSequence
from torch.nn.utils.rnn import pack_padded_sequence
from typing import Dict
from torch.nn.modules.module import _IncompatibleKeys
def _compute_last_states(h_n: 'List[torch.Tensor]', c_n:
'List[torch.Tensor]', seq_lengths: 'List[int]') ->Tuple[torch.Tensor,
torch.Tensor]:
"""
Given h and c values of all time steps, this function computes the h and c values for each sequence at their last timestep (this can vary across sequences with different sequence lengths).
Args:
h_n: A list of hidden state values across all timesteps.
c_n: A list of cell state values across all timesteps.
seq_lengths: the length parameter used in the torch.nn.utils.rnn.packed_padded_sequence function to create a PackedSequence. This can be computed using the _compute_seq_lengths function.
Returns:
h_last: Contains the last hidden state values for each of the sequences.
If the i'th sequence has a length of l_i, then h_last[i,:] contains the hidden state corresponding to the i'th sequence at timestep l_i.
c_last: The structure is the same as h_last, except that it contains the last cell state values for each of the sequences.
"""
max_batch_size = len(seq_lengths)
hidden_size = h_n[0].shape[-1]
h_last = torch.zeros(max_batch_size, hidden_size)
c_last = torch.zeros(max_batch_size, hidden_size)
for i, seq_len in enumerate(seq_lengths):
h_last[i, :] = h_n[seq_len - 1][i, :]
c_last[i, :] = c_n[seq_len - 1][i, :]
return h_last, c_last
def _compute_seq_lengths(batch_sizes: 'torch.Tensor') ->List[int]:
"""
Computes the sequence lengths (the length parameter used in the packed_padded_sequence function to create a PackedSequence).
Args:
batch_sizes: Contains the batch sizes as stored in a PackedSequence
Returns:
running_seq_lengths: the length parameter used in the torch.nn.utils.rnn.packed_padded_sequence function to create a PackedSequence.
It's a list of the same length as batch_sizes.
"""
max_batch_size = batch_sizes[0]
if len(batch_sizes) == 1:
return [1] * max_batch_size
running_seq = 0
running_seq_lengths = []
for i in range(1, len(batch_sizes)):
delta = batch_sizes[i - 1].item() - batch_sizes[i].item()
running_seq += 1
running_seq_lengths += delta * [running_seq]
running_seq += 1
running_seq_lengths += batch_sizes[-1].item() * [running_seq]
running_seq_lengths.reverse()
return running_seq_lengths
def _concat_sequence_directions(forward:
'Union[List[torch.Tensor], Tuple[torch.Tensor]]', reverse:
'Union[List[torch.Tensor], Tuple[torch.Tensor]]', dim: 'int') ->Tuple[torch
.Tensor]:
"""
Given two list/tuple of same length containing tensors, this function returns a concatenation along dimension d. So, output[i] : concatenation of forward[i] and reverse[i] along dimension dim.
forward[i] and reverse[i] should have the same shape. This function is used for concatenating the outputs of the forward and reverse layer of a bidirectional LSTM.
Args:
forward: list/tuple containing n tensors, representing the output of the forward layer.
reverse: list/tuple containing n tensors, representing the output of the backward layer.
dim: the dimension along which the sequence of tensors within forward and reverse will be concatenated.
Returns:
output: list/tuple containing n concatenated tensors.
"""
if len(forward) != len(reverse):
raise ValueError(
'The forward and reverse layer output sequences should h
# ... truncated (>4000 chars) for memory efficiency |
Net16 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/v7/cv7zazascu4rpkkwoxbiwk6c2le2e6wshdhae73bmaoapelvwguv.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 1024, grid=grid(1024), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net16(nn.Module):
def __init__(self, input_dim, output_dim):
super(Net16, self).__init__()
self.linear1 = nn.Linear(input_dim, 16)
self.linear2 = nn.Linear(16, output_dim)
def forward(self, x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (16, 4), (4, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 16), (256, 64, 16, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1,
primals_2, buf3, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, buf3
class Net16New(nn.Module):
def __init__(self, input_dim, output_dim):
super(Net16New, self).__init__()
self.linear1 = nn.Linear(input_dim, 16)
self.linear2 = nn.Linear(16, output_dim)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| gautam-sharma1/openRL | Net16 | false | 6,727 | [
"MIT"
] | 1 | 14310a97a328fe5682a01ee85d83a6b5e1ae29ca | https://github.com/gautam-sharma1/openRL/tree/14310a97a328fe5682a01ee85d83a6b5e1ae29ca | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
self.linear1 = nn.Linear(input_dim, 16)
self.linear2 = nn.Linear(16, output_dim)
def forward(self, x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.