entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
ConvBlockFixup | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ny/cnyftzt44marcm3rjc5ttmk3yz7tgjftwetyjwwa4mdifhwaiq5l.py
# Topologically Sorted Source Nodes: [add, out], Original ATen: [aten.add, aten.constant_pad_nd]
# Source node to ATen node mapping:
# add => add
# out => constant_pad_nd
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%add, [0, 0, 0, 1]), kwargs = {})
triton_poi_fused_add_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_add_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_constant_pad_nd_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 5
x2 = (xindex // 20)
x3 = xindex % 20
x4 = xindex
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x3 + (16*x2)), tmp2 & xmask, other=0.0)
tmp6 = tmp3 + tmp5
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp2, tmp6, tmp7)
tl.store(out_ptr0 + (x4), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2g/c2geersai56ajl5mszxuccvf32ingsf7uewltdfucegi7nprodd2.py
# Topologically Sorted Source Nodes: [add_1, out_1, add_2, out_2], Original ATen: [aten.add, aten.relu, aten.constant_pad_nd]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# out_1 => relu
# out_2 => constant_pad_nd_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_5), kwargs = {})
# %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%add_2, [0, 0, 0, 1]), kwargs = {})
triton_poi_fused_add_constant_pad_nd_relu_1 = async_compile.triton('triton_poi_fused_add_constant_pad_nd_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_constant_pad_nd_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_constant_pad_nd_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 5
x2 = (xindex // 20)
x3 = xindex % 20
x4 = xindex
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp9 = tl.load(in_ptr2 + (0))
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x3 + (16*x2)), tmp2 & xmask, other=0.0)
tmp6 = tmp3 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp11 = tmp8 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp2, tmp11, tmp12)
tl.store(out_ptr0 + (x4), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ru/cruzz4bw56op4a2umcojvqo7x3yllo524t6ildzprzx27ua6kcm5.py
# Topologically Sorted Source Nodes: [mul, out_3, out_4, out_5], Original ATen: [aten.mul, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# mul => mul
# out_3 => add_3
# out_4 => add_4
# out_5 => relu_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %primals_7), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_8), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_mul_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_add_mul_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_relu_threshold_backward_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + (x0), xmask)
tmp3 = tmp0 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + (x0), tmp10, xmask)
tl.store(out_ptr1 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cu/ccuuixkigxbxyhnijuiyewzzytgjirr5emo3575tqzbab4hum44n.py
# Topologically Sorted Source Nodes: [add_1, out_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add_1 => add_1
# out_1 => relu
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_7, (1, ), (1, ))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, out], Original ATen: [aten.add, aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_add_constant_pad_nd_0.run(primals_1, primals_2, buf0, 320, grid=grid(320), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add_1, out_1, add_2, out_2], Original ATen: [aten.add, aten.relu, aten.constant_pad_nd]
triton_poi_fused_add_constant_pad_nd_relu_1.run(buf1, primals_4, primals_5, buf2, 320, grid=grid(320), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [mul, out_3, out_4, out_5], Original ATen: [aten.mul, aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_mul_relu_threshold_backward_2.run(buf3, primals_7, primals_8, primals_1, buf4, buf5, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_8
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add_1, out_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_3.run(buf1, primals_4, buf6, 256, grid=grid(256), stream=stream0)
del buf1
del primals_4
return (buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 1), (16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ConvBlockFixup(nn.Module):
def __init__(self, filter_width, input_filters, nb_filters, dilation):
super(ConvBlockFixup, self).__init__()
self.filter_width = filter_width
self.input_filters = input_filters
self.nb_filters = nb_filters
self.dilation = dilation
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = nn.Conv2d(self.input_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.bias1b = nn.Parameter(torch.zeros(1))
self.relu = nn.ReLU(inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = nn.Conv2d(self.nb_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.relu(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
out += identity
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'filter_width': 4, 'input_filters': 4, 'nb_filters': 4,
'dilation': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_constant_pad_nd_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 5
x2 = xindex // 20
x3 = xindex % 20
x4 = xindex
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp2 & xmask, other=0.0)
tmp6 = tmp3 + tmp5
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp2, tmp6, tmp7)
tl.store(out_ptr0 + x4, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_constant_pad_nd_relu_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 5
x2 = xindex // 20
x3 = xindex % 20
x4 = xindex
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp9 = tl.load(in_ptr2 + 0)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x3 + 16 * x2), tmp2 & xmask, other=0.0)
tmp6 = tmp3 + tmp5
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp11 = tmp8 + tmp10
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp2, tmp11, tmp12)
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_mul_relu_threshold_backward_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + x0, xmask)
tmp3 = tmp0 * tmp2
tmp6 = tmp3 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = 0.0
tmp12 = tmp10 <= tmp11
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 1), (16, 4, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_constant_pad_nd_0[grid(320)](primals_1,
primals_2, buf0, 320, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 5, 4), (80, 20, 4, 1), torch.float32)
triton_poi_fused_add_constant_pad_nd_relu_1[grid(320)](buf1,
primals_4, primals_5, buf2, 320, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_5
buf3 = extern_kernels.convolution(buf2, primals_6, stride=(1, 1),
padding=(1, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_mul_relu_threshold_backward_2[grid(256)](buf3,
primals_7, primals_8, primals_1, buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
del primals_8
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_3[grid(256)](buf1,
primals_4, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_4
return buf4, primals_3, primals_6, primals_7, buf0, buf2, buf3, buf5, buf6
class ConvBlockFixupNew(nn.Module):
def __init__(self, filter_width, input_filters, nb_filters, dilation):
super(ConvBlockFixupNew, self).__init__()
self.filter_width = filter_width
self.input_filters = input_filters
self.nb_filters = nb_filters
self.dilation = dilation
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = nn.Conv2d(self.input_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.bias1b = nn.Parameter(torch.zeros(1))
self.relu = nn.ReLU(inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = nn.Conv2d(self.nb_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
def forward(self, input_0):
primals_2 = self.bias1a
primals_4 = self.bias1b
primals_5 = self.bias2a
primals_7 = self.scale
primals_8 = self.bias2b
primals_3 = self.conv1.weight
primals_6 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| OmarNajdi/dl-for-har | ConvBlockFixup | false | 934 | [
"MIT"
] | 0 | 5e1b7c29caf2b41fcba106cd901c45d8f2d18429 | https://github.com/OmarNajdi/dl-for-har/tree/5e1b7c29caf2b41fcba106cd901c45d8f2d18429 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, filter_width, input_filters, nb_filters, dilation):
super().__init__()
self.filter_width = filter_width
self.input_filters = input_filters
self.nb_filters = nb_filters
self.dilation = dilation
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = nn.Conv2d(self.input_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.bias1b = nn.Parameter(torch.zeros(1))
self.relu = nn.ReLU(inplace=True)
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = nn.Conv2d(self.nb_filters, self.nb_filters, (self.
filter_width, 1), dilation=(self.dilation, 1), bias=False,
padding='same')
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
def forward(self, x):
identity = x
out = self.conv1(x + self.bias1a)
out = self.relu(out + self.bias1b)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
out += identity
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'filter_width': 4, 'input_filters': 4, 'nb_filters': 4,
'dilation': 1}]
|
MarginRankingLoss_learning_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xj/cxjese7rxw7beqnmjkvtny3jlalffvsxa64emiazjfju5jgu7yhf.py
# Topologically Sorted Source Nodes: [sub, final_target, margin_ranking_loss], Original ATen: [aten.sub, aten.sign, aten.neg, aten.mul, aten.add, aten.clamp_min, aten.mean]
# Source node to ATen node mapping:
# final_target => sign
# margin_ranking_loss => add, clamp_min, mean, mul, neg, sub_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_4), kwargs = {})
# %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%sub,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sign,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_1, %slice_2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %sub_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%mul, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%clamp_min,), kwargs = {})
triton_per_fused_add_clamp_min_mean_mul_neg_sign_sub_0 = async_compile.triton('triton_per_fused_add_clamp_min_mean_mul_neg_sign_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 2],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_min_mean_mul_neg_sign_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_min_mean_mul_neg_sign_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp7 = tl.load(in_ptr0 + (2 + r0), None)
tmp22 = tl.load(in_ptr2 + (r0), None)
tmp23 = tl.load(in_ptr2 + (2 + r0), None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4), None, eviction_policy='evict_last')
tmp8 = tmp7 + tmp1
tmp9 = tmp7 < 0
tmp10 = tl.where(tmp9, tmp8, tmp7)
tl.device_assert((0 <= tmp10) & (tmp10 < 4), "index out of bounds: 0 <= tmp10 < 4")
tmp12 = tl.load(in_ptr1 + (tmp10), None, eviction_policy='evict_last')
tmp13 = tmp6 - tmp12
tmp14 = tl.full([1, 1], 0, tl.int32)
tmp15 = tmp14 < tmp13
tmp16 = tmp15.to(tl.int8)
tmp17 = tmp13 < tmp14
tmp18 = tmp17.to(tl.int8)
tmp19 = tmp16 - tmp18
tmp20 = tmp19.to(tmp13.dtype)
tmp21 = -tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp21 * tmp24
tmp26 = 0.5
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tmp33 = 2.0
tmp34 = tmp32 / tmp33
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp34, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1), (1, 1))
assert_size_stride(arg1_1, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [random], Original ATen: [aten.randperm]
buf0 = torch.ops.aten.randperm.default(4, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [sub, final_target, margin_ranking_loss], Original ATen: [aten.sub, aten.sign, aten.neg, aten.mul, aten.add, aten.clamp_min, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_min_mean_mul_neg_sign_sub_0.run(buf3, buf1, arg1_1, arg0_1, 1, 2, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MarginRankingLoss_learning_loss(nn.Module):
"""
Ranking loss as described in LPM paper
inputs/targets are randomly permutated
final target is a list of -1 and 1's
-1 means the item in the i list is higher 1 means the item in the j list is higher
This creates a pairwise ranking loss
"""
def __init__(self, margin=0.5):
super(MarginRankingLoss_learning_loss, self).__init__()
self.margin = margin
def forward(self, inputs, targets):
random = torch.randperm(inputs.size(0))
mid = int(inputs.size(0) // 2)
pred_lossi = inputs[:mid]
pred_lossj = inputs[mid:]
target_loss = targets.reshape(inputs.size(0), 1)
target_loss = target_loss[random]
target_lossi = target_loss[:mid]
target_lossj = target_loss[mid:]
final_target = torch.sign(target_lossi - target_lossj)
return F.margin_ranking_loss(pred_lossi, pred_lossj, final_target,
margin=self.margin, reduction='mean')
def get_inputs():
return [torch.rand([4, 1]), torch.rand([4, 1])]
def get_init_inputs():
return [[], {}]
| import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_min_mean_mul_neg_sign_sub_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp7 = tl.load(in_ptr0 + (2 + r0), None)
tmp22 = tl.load(in_ptr2 + r0, None)
tmp23 = tl.load(in_ptr2 + (2 + r0), None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + tmp4, None, eviction_policy='evict_last')
tmp8 = tmp7 + tmp1
tmp9 = tmp7 < 0
tmp10 = tl.where(tmp9, tmp8, tmp7)
tl.device_assert((0 <= tmp10) & (tmp10 < 4),
'index out of bounds: 0 <= tmp10 < 4')
tmp12 = tl.load(in_ptr1 + tmp10, None, eviction_policy='evict_last')
tmp13 = tmp6 - tmp12
tmp14 = tl.full([1, 1], 0, tl.int32)
tmp15 = tmp14 < tmp13
tmp16 = tmp15.to(tl.int8)
tmp17 = tmp13 < tmp14
tmp18 = tmp17.to(tl.int8)
tmp19 = tmp16 - tmp18
tmp20 = tmp19.to(tmp13.dtype)
tmp21 = -tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp21 * tmp24
tmp26 = 0.5
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tmp33 = 2.0
tmp34 = tmp32 / tmp33
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp34, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 1), (1, 1))
assert_size_stride(arg1_1, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.randperm.default(4, device=device(type='cuda',
index=0), pin_memory=False)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
get_raw_stream(0)
triton_per_fused_add_clamp_min_mean_mul_neg_sign_sub_0[grid(1)](buf3,
buf1, arg1_1, arg0_1, 1, 2, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del buf1
return buf3,
class MarginRankingLoss_learning_lossNew(nn.Module):
"""
Ranking loss as described in LPM paper
inputs/targets are randomly permutated
final target is a list of -1 and 1's
-1 means the item in the i list is higher 1 means the item in the j list is higher
This creates a pairwise ranking loss
"""
def __init__(self, margin=0.5):
super(MarginRankingLoss_learning_lossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Pepijnnn/MasterThesis | MarginRankingLoss_learning_loss | false | 936 | [
"MIT"
] | 0 | 7ec831f5e55f5f181e0196fa78284e2846ce2e26 | https://github.com/Pepijnnn/MasterThesis/tree/7ec831f5e55f5f181e0196fa78284e2846ce2e26 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Ranking loss as described in LPM paper
inputs/targets are randomly permutated
final target is a list of -1 and 1's
-1 means the item in the i list is higher 1 means the item in the j list is higher
This creates a pairwise ranking loss
"""
def __init__(self, margin=0.5):
super().__init__()
self.margin = margin
def forward(self, inputs, targets):
random = torch.randperm(inputs.size(0))
mid = int(inputs.size(0) // 2)
pred_lossi = inputs[:mid]
pred_lossj = inputs[mid:]
target_loss = targets.reshape(inputs.size(0), 1)
target_loss = target_loss[random]
target_lossi = target_loss[:mid]
target_lossj = target_loss[mid:]
final_target = torch.sign(target_lossi - target_lossj)
return F.margin_ranking_loss(pred_lossi, pred_lossj, final_target,
margin=self.margin, reduction='mean')
def get_inputs():
return [torch.rand([4, 1]), torch.rand([4, 1])]
def get_init_inputs():
return []
|
LRN | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/gv/cgvon7iygyhjm2kmwtta5t2r3z2byfrr4qwpcmym3h4h6yzxvtvp.py
# Topologically Sorted Source Nodes: [div, div_1, mul, add, div_2, x], Original ATen: [aten.pow, aten.avg_pool2d, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# div => pow_1
# div_1 => avg_pool2d
# div_2 => pow_2
# mul => mul
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [1, 1], [1, 1]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 0.0001), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 2.0), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {})
triton_poi_fused_add_avg_pool2d_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_avg_pool2d_div_mul_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_avg_pool2d_div_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0001
tmp5 = tmp3 * tmp4
tmp6 = 2.0
tmp7 = tmp5 + tmp6
tmp8 = 0.75
tmp9 = libdevice.pow(tmp7, tmp8)
tmp10 = tmp0 / tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [div, div_1, mul, add, div_2, x], Original ATen: [aten.pow, aten.avg_pool2d, aten.mul, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_div_mul_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LRN(nn.Module):
def __init__(self, local_size=1, alpha=0.0001, beta=0.75,
ACROSS_CHANNELS=False):
super(LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(2.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(2.0).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_avg_pool2d_div_mul_pow_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = 0.0001
tmp5 = tmp3 * tmp4
tmp6 = 2.0
tmp7 = tmp5 + tmp6
tmp8 = 0.75
tmp9 = libdevice.pow(tmp7, tmp8)
tmp10 = tmp0 / tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_avg_pool2d_div_mul_pow_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class LRNNew(nn.Module):
def __init__(self, local_size=1, alpha=0.0001, beta=0.75,
ACROSS_CHANNELS=False):
super(LRNNew, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| PengJingchao/DFNet | LRN | false | 937 | [
"MIT"
] | 0 | 49e83501f81515aebca211351e315896da7afc54 | https://github.com/PengJingchao/DFNet/tree/49e83501f81515aebca211351e315896da7afc54 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, local_size=1, alpha=0.0001, beta=0.75,
ACROSS_CHANNELS=False):
super().__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1),
stride=1, padding=(int((local_size - 1.0) / 2), 0, 0))
else:
self.average = nn.AvgPool2d(kernel_size=local_size, stride=1,
padding=int((local_size - 1.0) / 2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(2.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(2.0).pow(self.beta)
x = x.div(div)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ListNetLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [true_smax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# true_smax => amax_1, exp_1, sub_1
# Graph fragment:
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2k/c2k5uwppf5apzvlxiajoyekazsvq7wr55ts2llwoli6wc4intabk.py
# Topologically Sorted Source Nodes: [true_smax, preds_smax, preds_smax_1, preds_log, mul], Original ATen: [aten._softmax, aten.add, aten.log, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# preds_log => log
# preds_smax => div, sum_1
# preds_smax_1 => add
# true_smax => div_1, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 0.0005), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %log), kwargs = {})
triton_poi_fused__softmax_add_log_mul_1 = async_compile.triton('triton_poi_fused__softmax_add_log_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_log_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_log_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x3), xmask)
tmp10 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp9 / tmp16
tmp18 = 0.0005
tmp19 = tmp17 + tmp18
tmp20 = tl_math.log(tmp19)
tmp21 = tmp8 * tmp20
tl.store(out_ptr0 + (x3), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vo/cvojznxu67wmmp7b5valrucu3v3bhdekrp6efpbb7nhgo2ivqa5y.py
# Topologically Sorted Source Nodes: [sum_1, neg, mean], Original ATen: [aten.sum, aten.neg, aten.mean]
# Source node to ATen node mapping:
# mean => mean
# neg => neg
# sum_1 => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_3,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {})
triton_per_fused_mean_neg_sum_2 = async_compile.triton('triton_per_fused_mean_neg_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_neg_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_neg_sum_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp3 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = -tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tmp11 = 64.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [true_smax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [preds_smax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [true_smax, preds_smax, preds_smax_1, preds_log, mul], Original ATen: [aten._softmax, aten.add, aten.log, aten.mul]
triton_poi_fused__softmax_add_log_mul_1.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [sum_1, neg, mean], Original ATen: [aten.sum, aten.neg, aten.mean]
triton_per_fused_mean_neg_sum_2.run(buf4, buf2, 1, 64, grid=grid(1), stream=stream0)
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ListNetLoss(nn.Module):
def __init__(self):
super(ListNetLoss, self).__init__()
return
def forward(self, y_pred, y_true, eps=0.0005, padded_value_indicator=-1):
"""
ListNet loss introduced in "Learning to Rank: From Pairwise Approach to Listwise Approach".
:param y_pred: predictions from the model, shape [batch_size, slate_length]
:param y_true: ground truth labels, shape [batch_size, slate_length]
:param eps: epsilon value, used for numerical stability
:param padded_value_indicator: an indicator of the y_true index containing a padded item, e.g. -1
:return: loss value, a torch.Tensor
"""
y_pred = y_pred.clone()
y_true = y_true.clone()
preds_smax = F.softmax(y_pred, dim=1)
true_smax = F.softmax(y_true, dim=1)
preds_smax = preds_smax + eps
preds_log = torch.log(preds_smax)
return torch.mean(-torch.sum(true_smax * preds_log, dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_add_log_mul_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + x3, xmask)
tmp10 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp12 = tmp10 + tmp11
tmp14 = tmp12 + tmp13
tmp16 = tmp14 + tmp15
tmp17 = tmp9 / tmp16
tmp18 = 0.0005
tmp19 = tmp17 + tmp18
tmp20 = tl_math.log(tmp19)
tmp21 = tmp8 * tmp20
tl.store(out_ptr0 + x3, tmp21, xmask)
@triton.jit
def triton_per_fused_mean_neg_sum_2(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp3 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = -tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tmp11 = 64.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf1, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_add_log_mul_1[grid(256)](buf0, buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_mean_neg_sum_2[grid(1)](buf4, buf2, 1, 64, XBLOCK=
1, num_warps=2, num_stages=1)
del buf2
return buf4,
class ListNetLossNew(nn.Module):
def __init__(self):
super(ListNetLossNew, self).__init__()
return
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Pepijnnn/MasterThesis | ListNetLoss | false | 938 | [
"MIT"
] | 0 | 7ec831f5e55f5f181e0196fa78284e2846ce2e26 | https://github.com/Pepijnnn/MasterThesis/tree/7ec831f5e55f5f181e0196fa78284e2846ce2e26 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
return
def forward(self, y_pred, y_true, eps=0.0005, padded_value_indicator=-1):
"""
ListNet loss introduced in "Learning to Rank: From Pairwise Approach to Listwise Approach".
:param y_pred: predictions from the model, shape [batch_size, slate_length]
:param y_true: ground truth labels, shape [batch_size, slate_length]
:param eps: epsilon value, used for numerical stability
:param padded_value_indicator: an indicator of the y_true index containing a padded item, e.g. -1
:return: loss value, a torch.Tensor
"""
y_pred = y_pred.clone()
y_true = y_true.clone()
preds_smax = F.softmax(y_pred, dim=1)
true_smax = F.softmax(y_true, dim=1)
preds_smax = preds_smax + eps
preds_log = torch.log(preds_smax)
return torch.mean(-torch.sum(true_smax * preds_log, dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AsymmetricLossOptimized | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ry/cry5kqibd2j5v7zh6b5qbaeadqz6hqmokj5ujzqm6gt55y7roh7a.py
# Topologically Sorted Source Nodes: [sigmoid, clamp, log, mul, sub, sub_1, add_, clamp_, clamp_1, log_1, mul_1, add__1, mul_2, sub_2, mul_3, sub_3, mul_4, mul_5, add, pow_1, imul, sum_1, neg], Original ATen: [aten.sigmoid, aten.clamp, aten.log, aten.mul, aten.rsub, aten.add, aten.sub, aten.pow, aten.sum, aten.neg]
# Source node to ATen node mapping:
# add => add_2
# add_ => add
# add__1 => add_1
# clamp => clamp_min
# clamp_ => clamp_max
# clamp_1 => clamp_min_1
# imul => mul_6
# log => log
# log_1 => log_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# neg => neg
# pow_1 => pow_1
# sigmoid => sigmoid
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sum_1 => sum_1
# Graph fragment:
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg1_1,), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sigmoid, 1e-08), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {})
# %sub : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 0.05), kwargs = {})
# %clamp_max : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add, 1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%clamp_max, 1e-08), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %log_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg0_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_2), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, %sub), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 4), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {})
# %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%sub_3, %add_2), kwargs = {})
# %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %pow_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_6,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {})
triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=(8,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp4 * tmp0
tmp6 = tmp1 - tmp4
tmp7 = 0.05
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.minimum(tmp8, tmp1)
tmp10 = tmp9 * tmp2
tmp11 = tmp1 - tmp5
tmp12 = tmp11 - tmp10
tmp13 = tmp0 * tmp1
tmp14 = 4.0
tmp15 = tmp2 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.pow(tmp12, tmp16)
tmp18 = 1e-08
tmp19 = triton_helpers.maximum(tmp4, tmp18)
tmp20 = tl_math.log(tmp19)
tmp21 = tmp0 * tmp20
tmp22 = triton_helpers.maximum(tmp9, tmp18)
tmp23 = tl_math.log(tmp22)
tmp24 = tmp2 * tmp23
tmp25 = tmp21 + tmp24
tmp26 = tmp25 * tmp17
tmp27 = tl.broadcast_to(tmp26, [RBLOCK])
tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0))
tmp30 = -tmp29
tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp2, None)
tl.store(out_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp5, None)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [RBLOCK])), tmp10, None)
tl.store(out_ptr3 + (tl.broadcast_to(r0, [RBLOCK])), tmp17, None)
tl.store(out_ptr4 + (tl.broadcast_to(r0, [RBLOCK])), tmp26, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp30, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [sigmoid, clamp, log, mul, sub, sub_1, add_, clamp_, clamp_1, log_1, mul_1, add__1, mul_2, sub_2, mul_3, sub_3, mul_4, mul_5, add, pow_1, imul, sum_1, neg], Original ATen: [aten.sigmoid, aten.clamp, aten.log, aten.mul, aten.rsub, aten.add, aten.sub, aten.pow, aten.sum, aten.neg]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0.run(buf6, arg0_1, arg1_1, buf0, buf1, buf2, buf3, buf4, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf6, buf3, buf4, buf2, buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AsymmetricLossOptimized(nn.Module):
""" Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations"""
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-08,
disable_torch_grad_focal_loss=False):
super(AsymmetricLossOptimized, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
self.targets = y
self.anti_targets = 1 - y
self.xs_pos = torch.sigmoid(x)
self.xs_neg = 1.0 - self.xs_pos
if self.clip is not None and self.clip > 0:
self.xs_neg.add_(self.clip).clamp_(max=1)
self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))
self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=
self.eps)))
if self.gamma_neg > 0 or self.gamma_pos > 0:
self.xs_pos = self.xs_pos * self.targets
self.xs_neg = self.xs_neg * self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.
anti_targets)
self.loss *= self.asymmetric_w
return -self.loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp4 * tmp0
tmp6 = tmp1 - tmp4
tmp7 = 0.05
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.minimum(tmp8, tmp1)
tmp10 = tmp9 * tmp2
tmp11 = tmp1 - tmp5
tmp12 = tmp11 - tmp10
tmp13 = tmp0 * tmp1
tmp14 = 4.0
tmp15 = tmp2 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = libdevice.pow(tmp12, tmp16)
tmp18 = 1e-08
tmp19 = triton_helpers.maximum(tmp4, tmp18)
tmp20 = tl_math.log(tmp19)
tmp21 = tmp0 * tmp20
tmp22 = triton_helpers.maximum(tmp9, tmp18)
tmp23 = tl_math.log(tmp22)
tmp24 = tmp2 * tmp23
tmp25 = tmp21 + tmp24
tmp26 = tmp25 * tmp17
tmp27 = tl.broadcast_to(tmp26, [RBLOCK])
tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0))
tmp30 = -tmp29
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp2, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp5, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp10, None)
tl.store(out_ptr3 + tl.broadcast_to(r0, [RBLOCK]), tmp17, None)
tl.store(out_ptr4 + tl.broadcast_to(r0, [RBLOCK]), tmp26, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp30, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
get_raw_stream(0)
triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0[grid
(1)](buf6, arg0_1, arg1_1, buf0, buf1, buf2, buf3, buf4, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf6, buf3, buf4, buf2, buf1, buf0
class AsymmetricLossOptimizedNew(nn.Module):
""" Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations"""
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-08,
disable_torch_grad_focal_loss=False):
super(AsymmetricLossOptimizedNew, self).__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Pepijnnn/MasterThesis | AsymmetricLossOptimized | false | 939 | [
"MIT"
] | 0 | 7ec831f5e55f5f181e0196fa78284e2846ce2e26 | https://github.com/Pepijnnn/MasterThesis/tree/7ec831f5e55f5f181e0196fa78284e2846ce2e26 | import torch
import torch.nn as nn
class Model(nn.Module):
""" Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations"""
def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-08,
disable_torch_grad_focal_loss=False):
super().__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.clip = clip
self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
self.eps = eps
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def forward(self, x, y):
""""
Parameters
----------
x: input logits
y: targets (multi-label binarized vector)
"""
self.targets = y
self.anti_targets = 1 - y
self.xs_pos = torch.sigmoid(x)
self.xs_neg = 1.0 - self.xs_pos
if self.clip is not None and self.clip > 0:
self.xs_neg.add_(self.clip).clamp_(max=1)
self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))
self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=
self.eps)))
if self.gamma_neg > 0 or self.gamma_pos > 0:
self.xs_pos = self.xs_pos * self.targets
self.xs_neg = self.xs_neg * self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.
anti_targets)
self.loss *= self.asymmetric_w
return -self.loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
HorizontalMaxPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ma/cmamwx5pzfl7ui4xihdrse7u4abzdvpalywvuwdsmh362iq42kjh.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class HorizontalMaxPool2d(nn.Module):
def __init__(self):
super(HorizontalMaxPool2d, self).__init__()
def forward(self, x):
inp_size = x.size()
return nn.functional.max_pool2d(input=x, kernel_size=(1, inp_size[3]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class HorizontalMaxPool2dNew(nn.Module):
def __init__(self):
super(HorizontalMaxPool2dNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Qidian213/NAIC2019 | HorizontalMaxPool2d | false | 940 | [
"MIT"
] | 0 | 23e05a8a096168ccfa4d1743467fdf78ffcaabba | https://github.com/Qidian213/NAIC2019/tree/23e05a8a096168ccfa4d1743467fdf78ffcaabba | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
inp_size = x.size()
return nn.functional.max_pool2d(input=x, kernel_size=(1, inp_size[3]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BinaryLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ha/chaw4j4cijilv4rurwmzvs3nnol6fmwmio6yneprgmynztfqb3kg.py
# Topologically Sorted Source Nodes: [pos_loss, sum_1, neg_loss, sum_2, add, loss], Original ATen: [aten.neg, aten.sum, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# loss => div
# neg_loss => neg_1
# pos_loss => neg
# sum_1 => sum_3
# sum_2 => sum_4
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%neg,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%select_1,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%neg_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, %sum_4), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 8), kwargs = {})
triton_per_fused_add_div_neg_sum_1 = async_compile.triton('triton_per_fused_add_div_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp17 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp19 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp22 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp25 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = tl_math.exp(tmp1)
tmp3 = tl_math.exp(tmp0)
tmp4 = tmp2 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp13 = -tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp18 = tl_math.exp(tmp17)
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tl_math.log(tmp27)
tmp29 = tmp17 - tmp28
tmp30 = -tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = tmp16 + tmp33
tmp35 = 0.125
tmp36 = tmp34 * tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp36, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_0.run(arg1_1, buf2, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [pos_loss, sum_1, neg_loss, sum_2, add, loss], Original ATen: [aten.neg, aten.sum, aten.add, aten.div]
triton_per_fused_add_div_neg_sum_1.run(buf4, buf0, buf2, 1, 64, grid=grid(1), stream=stream0)
del buf0
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class BinaryLoss(nn.Module):
def __init__(self):
super(BinaryLoss, self).__init__()
def forward(self, pos_score, neg_score):
pos_loss = -F.log_softmax(pos_score, dim=1)[:, 1]
neg_loss = -F.log_softmax(neg_score, dim=1)[:, 0]
loss = (pos_loss.sum() + neg_loss.sum()) / (pos_loss.size(0) +
neg_loss.size(0))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused_add_div_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp17 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp22 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp25 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tl_math.exp(tmp1)
tmp3 = tl_math.exp(tmp0)
tmp4 = tmp2 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp13 = -tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.sum(tmp14, 1)[:, None]
tmp18 = tl_math.exp(tmp17)
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tl_math.log(tmp27)
tmp29 = tmp17 - tmp28
tmp30 = -tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.sum(tmp31, 1)[:, None]
tmp34 = tmp16 + tmp33
tmp35 = 0.125
tmp36 = tmp34 * tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp36, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf4 = buf1
del buf1
triton_per_fused_add_div_neg_sum_1[grid(1)](buf4, buf0, buf2, 1, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf2
return buf4,
class BinaryLossNew(nn.Module):
def __init__(self):
super(BinaryLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| PengJingchao/DFNet | BinaryLoss | false | 941 | [
"MIT"
] | 0 | 49e83501f81515aebca211351e315896da7afc54 | https://github.com/PengJingchao/DFNet/tree/49e83501f81515aebca211351e315896da7afc54 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pos_score, neg_score):
pos_loss = -F.log_softmax(pos_score, dim=1)[:, 1]
neg_loss = -F.log_softmax(neg_score, dim=1)[:, 0]
loss = (pos_loss.sum() + neg_loss.sum()) / (pos_loss.size(0) +
neg_loss.size(0))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
HSigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/gl/cgljna3wfarubemgd6d2p3bgazvfhdxtrcu7luu5yza3rrfkty2s.py
# Topologically Sorted Source Nodes: [add, relu6, truediv], Original ATen: [aten.add, aten.hardtanh, aten.div]
# Source node to ATen node mapping:
# add => add
# relu6 => clamp_max, clamp_min
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
triton_poi_fused_add_div_hardtanh_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, truediv], Original ATen: [aten.add, aten.hardtanh, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.optim
import torch.nn.parallel
import torch.utils.data.distributed
class HSigmoid(nn.Module):
""" Applies the Hard-Sigmoid function element-wise.
`"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_
Examples:
>>> m = Mish()
>>> x = torch.randn(2)
>>> output = m(x)
"""
@staticmethod
def forward(x: 'torch.Tensor') ->torch.Tensor:
return F.relu6(x + 3, inplace=True) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional
import torch.optim
import torch.nn.parallel
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HSigmoidNew(nn.Module):
""" Applies the Hard-Sigmoid function element-wise.
`"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_
Examples:
>>> m = Mish()
>>> x = torch.randn(2)
>>> output = m(x)
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| PhelaPoscam/SRGAN-PyTorch | HSigmoid | false | 942 | [
"Apache-2.0"
] | 0 | c1c68707dbddd1130b2ea71023df748080bcbd52 | https://github.com/PhelaPoscam/SRGAN-PyTorch/tree/c1c68707dbddd1130b2ea71023df748080bcbd52 | import torch
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.optim
import torch.nn.parallel
import torch.utils.data.distributed
class Model(nn.Module):
""" Applies the Hard-Sigmoid function element-wise.
`"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_
Examples:
>>> m = Mish()
>>> x = torch.randn(2)
>>> output = m(x)
"""
@staticmethod
def forward(x: 'torch.Tensor') ->torch.Tensor:
return F.relu6(x + 3, inplace=True) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Module_CharbonnierLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ar/car7cj6c4qyubctqoezgxg4se3a2b7mtff6zfvpuv3hqcitn26ue.py
# Topologically Sorted Source Nodes: [sub, pow_1, add, sqrt, mean], Original ATen: [aten.sub, aten.pow, aten.add, aten.sqrt, aten.mean]
# Source node to ATen node mapping:
# add => add
# mean => mean
# pow_1 => pow_1
# sqrt => sqrt
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, 1e-06), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
triton_per_fused_add_mean_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, add, sqrt, mean], Original ATen: [aten.sub, aten.pow, aten.add, aten.sqrt, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_pow_sqrt_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Module_CharbonnierLoss(nn.Module):
def __init__(self, epsilon=0.001):
super(Module_CharbonnierLoss, self).__init__()
self.epsilon = epsilon
def forward(self, output, gt):
return torch.mean(torch.sqrt((output - gt) ** 2 + self.epsilon ** 2))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_pow_sqrt_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class Module_CharbonnierLossNew(nn.Module):
def __init__(self, epsilon=0.001):
super(Module_CharbonnierLossNew, self).__init__()
self.epsilon = epsilon
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Pumpkin123709/LBEC | Module_CharbonnierLoss | false | 944 | [
"MIT"
] | 0 | 18661faa35769f731847e0226ff601754e134668 | https://github.com/Pumpkin123709/LBEC/tree/18661faa35769f731847e0226ff601754e134668 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, epsilon=0.001):
super().__init__()
self.epsilon = epsilon
def forward(self, output, gt):
return torch.mean(torch.sqrt((output - gt) ** 2 + self.epsilon ** 2))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/in/cin6bebkpfuweyzzgtljy26zh2yhrs7rpusw2jnlmszgn4jg27lx.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone, aten.transpose]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
# %permute_8 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%clone, [0, 2, 1]), kwargs = {})
triton_poi_fused_clone_transpose_0 = async_compile.triton('triton_poi_fused_clone_transpose_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_transpose_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y2 + (4*x1) + (16*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_weights => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wd/cwdechbtujfh3khensgj7m65ycmclcmrggkwsxpoa3is2n47bah4.py
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# combined => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %view_1], 2), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/oj/coje6ro7aly3k4hwvxmkcoxi6nwxzpg23gh2inoddo4imx7svkus.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# output_1 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_4 = async_compile.triton('triton_poi_fused_tanh_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [query_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone, aten.transpose]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_transpose_0.run(primals_2, buf1, buf9, 16, 4, grid=grid(16, 4), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous, attention_scores], Original ATen: [aten.clone, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), buf1, out=buf2)
buf3 = reinterpret_tensor(buf1, (16, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = reinterpret_tensor(buf2, (16, 4), (4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [attention_weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [mix], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf5)
buf6 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf5, buf0, buf6, 128, grid=grid(128), stream=stream0)
del buf0
buf7 = reinterpret_tensor(buf5, (16, 4), (4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf6, (16, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_4.run(buf8, 64, grid=grid(64), stream=stream0)
return (buf8, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), buf4, reinterpret_tensor(buf6, (16, 8), (8, 1), 0), buf8, primals_4, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Attention(nn.Module):
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
>>> attention = Attention(256)
>>> query = torch.randn(5, 1, 256)
>>> context = torch.randn(5, 5, 256)
>>> output, weights = attention(query, context)
>>> output.size()
torch.Size([5, 1, 256])
>>> weights.size()
torch.Size([5, 1, 5])
"""
def __init__(self, dimensions, attention_type='general'):
super(Attention, self).__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = nn.Linear(dimensions, dimensions, bias=False)
self.linear_out = nn.Linear(dimensions * 2, dimensions, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.tanh = nn.Tanh()
def forward(self, query, context):
"""
Args:
query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of
queries to query the context.
context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data
overwhich to apply the attention mechanism.
Returns:
:class:`tuple` with `output` and `weights`:
* **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]):
Tensor containing the attended features.
* **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]):
Tensor containing attention weights.
"""
batch_size, output_len, dimensions = query.size()
query_len = context.size(1)
if self.attention_type == 'general':
query = query.view(batch_size * output_len, dimensions)
query = self.linear_in(query)
query = query.view(batch_size, output_len, dimensions)
attention_scores = torch.bmm(query, context.transpose(1, 2).
contiguous())
attention_scores = attention_scores.view(batch_size * output_len,
query_len)
attention_weights = self.softmax(attention_scores)
attention_weights = attention_weights.view(batch_size, output_len,
query_len)
mix = torch.bmm(attention_weights, context)
combined = torch.cat((mix, query), dim=2)
combined = combined.view(batch_size * output_len, 2 * dimensions)
output = self.linear_out(combined).view(batch_size, output_len,
dimensions)
output = self.tanh(output)
return output, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dimensions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_transpose_0(in_ptr0, out_ptr0, out_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
tl.store(out_ptr1 + (y2 + 4 * x1 + 16 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
buf9 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_transpose_0[grid(16, 4)](primals_2, buf1,
buf9, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), buf1, out=buf2)
buf3 = reinterpret_tensor(buf1, (16, 4), (4, 1), 0)
del buf1
triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4), (4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf5)
buf6 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_3[grid(128)](buf5, buf0, buf6, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
buf7 = reinterpret_tensor(buf5, (16, 4), (4, 1), 0)
del buf5
extern_kernels.mm(reinterpret_tensor(buf6, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0)
del buf7
triton_poi_fused_tanh_4[grid(64)](buf8, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf8, reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0
), buf4, reinterpret_tensor(buf6, (16, 8), (8, 1), 0
), buf8, primals_4, buf9
class AttentionNew(nn.Module):
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
>>> attention = Attention(256)
>>> query = torch.randn(5, 1, 256)
>>> context = torch.randn(5, 5, 256)
>>> output, weights = attention(query, context)
>>> output.size()
torch.Size([5, 1, 256])
>>> weights.size()
torch.Size([5, 1, 5])
"""
def __init__(self, dimensions, attention_type='general'):
super(AttentionNew, self).__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = nn.Linear(dimensions, dimensions, bias=False)
self.linear_out = nn.Linear(dimensions * 2, dimensions, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.tanh = nn.Tanh()
def forward(self, input_0, input_1):
primals_3 = self.linear_in.weight
primals_4 = self.linear_out.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0], output[1]
| PattynR/PyTorch-NLP | Attention | false | 945 | [
"BSD-3-Clause"
] | 0 | 8995774abf3734db6da174425843d883face5218 | https://github.com/PattynR/PyTorch-NLP/tree/8995774abf3734db6da174425843d883face5218 | import torch
import torch.nn as nn
class Model(nn.Module):
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
>>> attention = Attention(256)
>>> query = torch.randn(5, 1, 256)
>>> context = torch.randn(5, 5, 256)
>>> output, weights = attention(query, context)
>>> output.size()
torch.Size([5, 1, 256])
>>> weights.size()
torch.Size([5, 1, 5])
"""
def __init__(self, dimensions, attention_type='general'):
super().__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = nn.Linear(dimensions, dimensions, bias=False)
self.linear_out = nn.Linear(dimensions * 2, dimensions, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.tanh = nn.Tanh()
def forward(self, query, context):
"""
Args:
query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of
queries to query the context.
context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data
overwhich to apply the attention mechanism.
Returns:
:class:`tuple` with `output` and `weights`:
* **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]):
Tensor containing the attended features.
* **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]):
Tensor containing attention weights.
"""
batch_size, output_len, dimensions = query.size()
query_len = context.size(1)
if self.attention_type == 'general':
query = query.view(batch_size * output_len, dimensions)
query = self.linear_in(query)
query = query.view(batch_size, output_len, dimensions)
attention_scores = torch.bmm(query, context.transpose(1, 2).
contiguous())
attention_scores = attention_scores.view(batch_size * output_len,
query_len)
attention_weights = self.softmax(attention_scores)
attention_weights = attention_weights.view(batch_size, output_len,
query_len)
mix = torch.bmm(attention_weights, context)
combined = torch.cat((mix, query), dim=2)
combined = combined.view(batch_size * output_len, 2 * dimensions)
output = self.linear_out(combined).view(batch_size, output_len,
dimensions)
output = self.tanh(output)
return output, attention_weights
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
Reg_layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_2 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Reg_layer(nn.Module):
"""
modified by Zylo117
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3,
padding=1, bias=True)
self.header = nn.Conv2d(in_channels, out_channels, kernel_size=1,
stride=1, padding=0, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
x = self.header(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1
class Reg_layerNew(nn.Module):
"""
modified by Zylo117
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3,
padding=1, bias=True)
self.header = nn.Conv2d(in_channels, out_channels, kernel_size=1,
stride=1, padding=0, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.header.weight
primals_5 = self.header.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Peiiii/detro | Reg_layer | false | 946 | [
"MIT"
] | 0 | 26d74468d7554dc20b2a2daf7ec5009302c820f2 | https://github.com/Peiiii/detro/tree/26d74468d7554dc20b2a2daf7ec5009302c820f2 | import torch
from torch import nn
class Model(nn.Module):
"""
modified by Zylo117
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3,
padding=1, bias=True)
self.header = nn.Conv2d(in_channels, out_channels, kernel_size=1,
stride=1, padding=0, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
x = self.header(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Classifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/7k/c7kjkeggii63yvgjzjnf3liqwpad37zmjmymxfbjh5lqrygqgzxj.py
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten.add]
# Source node to ATen node mapping:
# bilinear => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear]
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_1
buf1 = buf0
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf2, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
return (buf2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self, num_inputs1, num_inputs2):
super().__init__()
self.network = nn.Bilinear(num_inputs1, num_inputs2, 1)
def forward(self, x1, x2):
return self.network(x1, x2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs1': 4, 'num_inputs2': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_4, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor(
primals_3, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_1
buf1 = buf0
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf2, primals_2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
return buf2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
class ClassifierNew(nn.Module):
def __init__(self, num_inputs1, num_inputs2):
super().__init__()
self.network = nn.Bilinear(num_inputs1, num_inputs2, 1)
def forward(self, input_0, input_1):
primals_1 = self.network.weight
primals_2 = self.network.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Project-Agni/Detection | Classifier | false | 947 | [
"MIT"
] | 0 | 6b2c8ec25f8bd2bd15995d67f2808352cec9e2af | https://github.com/Project-Agni/Detection/tree/6b2c8ec25f8bd2bd15995d67f2808352cec9e2af | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, num_inputs1, num_inputs2):
super().__init__()
self.network = nn.Bilinear(num_inputs1, num_inputs2, 1)
def forward(self, x1, x2):
return self.network(x1, x2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DQNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/jv/cjvfpvazszqsn7k2c7ac25njk43pn5fjlaxzgkwwsgomov2lqu5x.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (24, 4), (4, 1))
assert_size_stride(primals_2, (24, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (24, 24), (24, 1))
assert_size_stride(primals_5, (24, ), (1, ))
assert_size_stride(primals_6, (4, 24), (24, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (24, 4), (4, 1))
assert_size_stride(primals_9, (24, ), (1, ))
assert_size_stride(primals_10, (24, 24), (24, 1))
assert_size_stride(primals_11, (24, ), (1, ))
assert_size_stride(primals_12, (4, 24), (24, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 24), (384, 96, 24, 1), 0); del buf0 # reuse
buf13 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf13, 1536, grid=grid(1536), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(primals_4, (24, 24), (1, 24), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 24), (384, 96, 24, 1), 0); del buf2 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf12, 1536, grid=grid(1536), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 24), (24, 1), 0), reinterpret_tensor(primals_6, (24, 4), (1, 24), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_8, (4, 24), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 24), (384, 96, 24, 1), 0); del buf5 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf6, primals_9, buf11, 1536, grid=grid(1536), stream=stream0)
del primals_9
buf7 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf6, (64, 24), (24, 1), 0), reinterpret_tensor(primals_10, (24, 24), (1, 24), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 24), (384, 96, 24, 1), 0); del buf7 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf8, primals_11, buf10, 1536, grid=grid(1536), stream=stream0)
del primals_11
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf8, (64, 24), (24, 1), 0), reinterpret_tensor(primals_12, (24, 4), (1, 24), 0), alpha=1, beta=1, out=buf9)
del primals_13
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(buf3, (64, 24), (24, 1), 0), buf4, reinterpret_tensor(buf6, (64, 24), (24, 1), 0), reinterpret_tensor(buf8, (64, 24), (24, 1), 0), primals_12, buf10, primals_10, buf11, primals_8, primals_6, buf12, primals_4, buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((24, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((24, 24), (24, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 24), (24, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((24, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((24, 24), (24, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 24), (24, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class Prediction(nn.Module):
"""Defines the prediction module as an ANN"""
def __init__(self, state_size: 'int', action_size: 'int', fc1: 'int'=24,
fc2: 'int'=24):
super(Prediction, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.fc1 = nn.Linear(state_size, fc1)
self.fc2 = nn.Linear(fc1, fc2)
self.fc3 = nn.Linear(fc2, action_size)
def forward(self, state):
"""Prediction unit forward pass. Input state, s_t. Output, action, a_t"""
x = self.fc1(state)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class Environment(nn.Module):
"""Defines the Environment module as an ANN"""
def __init__(self, state_size: 'int', action_size: 'int', fc1: 'int'=24,
fc2: 'int'=24):
super(Environment, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.fc1 = nn.Linear(action_size, fc1)
self.fc2 = nn.Linear(fc1, fc2)
self.fc3 = nn.Linear(fc2, state_size)
def forward(self, action):
"""Environment unit forward pass. Input action, a_t. Output, s_t+1."""
x = self.fc1(action)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class DQNetwork(nn.Module):
"""Main DQN network utilizing `Prediction` and `Environment` modules"""
def __init__(self, state_size: 'int', action_size: 'int'):
super(DQNetwork, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.prediction = Prediction(state_size, action_size)
self.env = Environment(state_size, action_size)
def forward(self, state):
"""Returns a_t and s_t+1"""
action = self.prediction(state)
predicted_next_state = self.env(action)
return action, predicted_next_state
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (24, 4), (4, 1))
assert_size_stride(primals_2, (24,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (24, 24), (24, 1))
assert_size_stride(primals_5, (24,), (1,))
assert_size_stride(primals_6, (4, 24), (24, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (24, 4), (4, 1))
assert_size_stride(primals_9, (24,), (1,))
assert_size_stride(primals_10, (24, 24), (24, 1))
assert_size_stride(primals_11, (24,), (1,))
assert_size_stride(primals_12, (4, 24), (24, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 24), (384, 96, 24, 1), 0)
del buf0
buf13 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf1,
primals_2, buf13, 1536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 24), (24, 1), 0),
reinterpret_tensor(primals_4, (24, 24), (1, 24), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 24), (384, 96, 24, 1), 0)
del buf2
buf12 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf3,
primals_5, buf12, 1536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 24),
(24, 1), 0), reinterpret_tensor(primals_6, (24, 4), (1, 24), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_8, (4, 24), (1,
4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 24), (384, 96, 24, 1), 0)
del buf5
buf11 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf6,
primals_9, buf11, 1536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf7 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf6, (64, 24), (24, 1), 0),
reinterpret_tensor(primals_10, (24, 24), (1, 24), 0), out=buf7)
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 24), (384, 96, 24, 1), 0)
del buf7
buf10 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf8,
primals_11, buf10, 1536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf8, (64, 24),
(24, 1), 0), reinterpret_tensor(primals_12, (24, 4), (1, 24), 0
), alpha=1, beta=1, out=buf9)
del primals_13
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(
buf3, (64, 24), (24, 1), 0), buf4, reinterpret_tensor(buf6, (64, 24
), (24, 1), 0), reinterpret_tensor(buf8, (64, 24), (24, 1), 0),
primals_12, buf10, primals_10, buf11, primals_8, primals_6, buf12,
primals_4, buf13)
class Prediction(nn.Module):
"""Defines the prediction module as an ANN"""
def __init__(self, state_size: 'int', action_size: 'int', fc1: 'int'=24,
fc2: 'int'=24):
super(Prediction, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.fc1 = nn.Linear(state_size, fc1)
self.fc2 = nn.Linear(fc1, fc2)
self.fc3 = nn.Linear(fc2, action_size)
def forward(self, state):
"""Prediction unit forward pass. Input state, s_t. Output, action, a_t"""
x = self.fc1(state)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class Environment(nn.Module):
"""Defines the Environment module as an ANN"""
def __init__(self, state_size: 'int', action_size: 'int', fc1: 'int'=24,
fc2: 'int'=24):
super(Environment, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.fc1 = nn.Linear(action_size, fc1)
self.fc2 = nn.Linear(fc1, fc2)
self.fc3 = nn.Linear(fc2, state_size)
def forward(self, action):
"""Environment unit forward pass. Input action, a_t. Output, s_t+1."""
x = self.fc1(action)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class DQNetworkNew(nn.Module):
"""Main DQN network utilizing `Prediction` and `Environment` modules"""
def __init__(self, state_size: 'int', action_size: 'int'):
super(DQNetworkNew, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.prediction = Prediction(state_size, action_size)
self.env = Environment(state_size, action_size)
def forward(self, input_0):
primals_1 = self.prediction.fc1.weight
primals_2 = self.prediction.fc1.bias
primals_4 = self.prediction.fc2.weight
primals_5 = self.prediction.fc2.bias
primals_6 = self.prediction.fc3.weight
primals_7 = self.prediction.fc3.bias
primals_8 = self.env.fc1.weight
primals_9 = self.env.fc1.bias
primals_10 = self.env.fc2.weight
primals_11 = self.env.fc2.bias
primals_12 = self.env.fc3.weight
primals_13 = self.env.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
| QasimWani/EARL | DQNetwork | false | 948 | [
"MIT"
] | 0 | 05c303335e67903380771c4954a5317bd46fc0e7 | https://github.com/QasimWani/EARL/tree/05c303335e67903380771c4954a5317bd46fc0e7 | import torch
import torch.nn.functional as F
import torch.nn as nn
class Prediction(nn.Module):
"""Defines the prediction module as an ANN"""
def __init__(self, state_size: 'int', action_size: 'int', fc1: 'int'=24,
fc2: 'int'=24):
super().__init__()
self.state_size = state_size
self.action_size = action_size
self.fc1 = nn.Linear(state_size, fc1)
self.fc2 = nn.Linear(fc1, fc2)
self.fc3 = nn.Linear(fc2, action_size)
def forward(self, state):
"""Prediction unit forward pass. Input state, s_t. Output, action, a_t"""
x = self.fc1(state)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class Environment(nn.Module):
"""Defines the Environment module as an ANN"""
def __init__(self, state_size: 'int', action_size: 'int', fc1: 'int'=24,
fc2: 'int'=24):
super().__init__()
self.state_size = state_size
self.action_size = action_size
self.fc1 = nn.Linear(action_size, fc1)
self.fc2 = nn.Linear(fc1, fc2)
self.fc3 = nn.Linear(fc2, state_size)
def forward(self, action):
"""Environment unit forward pass. Input action, a_t. Output, s_t+1."""
x = self.fc1(action)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class Model(nn.Module):
"""Main DQN network utilizing `Prediction` and `Environment` modules"""
def __init__(self, state_size: 'int', action_size: 'int'):
super().__init__()
self.state_size = state_size
self.action_size = action_size
self.prediction = Prediction(state_size, action_size)
self.env = Environment(state_size, action_size)
def forward(self, state):
"""Returns a_t and s_t+1"""
action = self.prediction(state)
predicted_next_state = self.env(action)
return action, predicted_next_state
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
hsigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/gl/cgljna3wfarubemgd6d2p3bgazvfhdxtrcu7luu5yza3rrfkty2s.py
# Topologically Sorted Source Nodes: [add, relu6, out], Original ATen: [aten.add, aten.hardtanh, aten.div]
# Source node to ATen node mapping:
# add => add
# out => div
# relu6 => clamp_max, clamp_min
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6), kwargs = {})
triton_poi_fused_add_div_hardtanh_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, out], Original ATen: [aten.add, aten.hardtanh, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class hsigmoid(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class hsigmoidNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Qidian213/NAIC2019 | hsigmoid | false | 949 | [
"MIT"
] | 0 | 23e05a8a096168ccfa4d1743467fdf78ffcaabba | https://github.com/Qidian213/NAIC2019/tree/23e05a8a096168ccfa4d1743467fdf78ffcaabba | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SeparableConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 4), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
return (buf2, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class SeparableConv2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=False):
super(SeparableConv2d, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels, in_channels, kernel_size,
stride, padding, dilation, groups=in_channels)
self.pointwise = torch.nn.Conv2d(in_channels, out_channels, 1, 1, 0,
1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
return buf2, primals_1, primals_3, primals_4, buf1
class SeparableConv2dNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=False):
super(SeparableConv2dNew, self).__init__()
self.conv1 = torch.nn.Conv2d(in_channels, in_channels, kernel_size,
stride, padding, dilation, groups=in_channels)
self.pointwise = torch.nn.Conv2d(in_channels, out_channels, 1, 1, 0,
1, 1, bias=bias)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.pointwise.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Pumpkin123709/LBEC | SeparableConv2d | false | 950 | [
"MIT"
] | 0 | 18661faa35769f731847e0226ff601754e134668 | https://github.com/Pumpkin123709/LBEC/tree/18661faa35769f731847e0226ff601754e134668 | import torch
class Model(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=False):
super().__init__()
self.conv1 = torch.nn.Conv2d(in_channels, in_channels, kernel_size,
stride, padding, dilation, groups=in_channels)
self.pointwise = torch.nn.Conv2d(in_channels, out_channels, 1, 1, 0,
1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
InputProjectionA | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/e6/ce6fwb7cuhy3qppzvzwzq3dqytlyhklktwnjhzdza6cxmtqodq25.py
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# input_1 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [2, 2], [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/q2/cq2e75eszxxaetpgtnr5ljr5vh3tqmcqidqhrvudh2tljkbnnnyk.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# input_1 => avg_pool2d
# input_2 => avg_pool2d_1
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [2, 2], [2, 2]), kwargs = {})
# %avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%avg_pool2d, [2, 2], [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xg/cxgmndo32qigjql7dqcxlvgfdglqloagidx7i4d2d5pogp36nr6z.py
# Topologically Sorted Source Nodes: [input_1, input_2, input_3], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# input_1 => avg_pool2d
# input_2 => avg_pool2d_1
# input_3 => avg_pool2d_2
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [2, 2], [2, 2]), kwargs = {})
# %avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%avg_pool2d, [2, 2], [2, 2]), kwargs = {})
# %avg_pool2d_2 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%avg_pool2d_1, [2, 2], [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_2 = async_compile.triton('triton_poi_fused_avg_pool2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/bx/cbx63wyeqi7olz44znwpy535ph7u6xevmlpktla6ppx4ukf6ccfn.py
# Topologically Sorted Source Nodes: [input_1, input_2, input_3, input_4], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# input_1 => avg_pool2d
# input_2 => avg_pool2d_1
# input_3 => avg_pool2d_2
# input_4 => avg_pool2d_3
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [2, 2], [2, 2]), kwargs = {})
# %avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%avg_pool2d, [2, 2], [2, 2]), kwargs = {})
# %avg_pool2d_2 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%avg_pool2d_1, [2, 2], [2, 2]), kwargs = {})
# %avg_pool2d_3 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%avg_pool2d_2, [2, 2], [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16384, grid=grid(16384), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_1.run(buf0, buf1, 4096, grid=grid(4096), stream=stream0)
del buf0
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1, input_2, input_3], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_2.run(buf1, buf2, 1024, grid=grid(1024), stream=stream0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1, input_2, input_3, input_4], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_3.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class InputProjectionA(nn.Module):
"""
This class projects the input image to the same spatial dimensions as the feature map.
For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then
this class will generate an output of 56x56x3
"""
def __init__(self, samplingTimes):
"""
:param samplingTimes: The rate at which you want to down-sample the image
"""
super().__init__()
self.pool = nn.ModuleList()
for i in range(0, samplingTimes):
self.pool.append(nn.AvgPool2d(2, stride=2, padding=0))
def forward(self, input):
"""
:param input: Input RGB Image
:return: down-sampled image (pyramid-based approach)
"""
for pool in self.pool:
input = pool(input)
return input
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'samplingTimes': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16384)](arg0_1, buf0, 16384,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_poi_fused_avg_pool2d_1[grid(4096)](buf0, buf1, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
triton_poi_fused_avg_pool2d_2[grid(1024)](buf1, buf2, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_avg_pool2d_3[grid(256)](buf2, buf3, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf2
return buf3,
class InputProjectionANew(nn.Module):
"""
This class projects the input image to the same spatial dimensions as the feature map.
For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then
this class will generate an output of 56x56x3
"""
def __init__(self, samplingTimes):
"""
:param samplingTimes: The rate at which you want to down-sample the image
"""
super().__init__()
self.pool = nn.ModuleList()
for i in range(0, samplingTimes):
self.pool.append(nn.AvgPool2d(2, stride=2, padding=0))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| PhillipHuang2017/ext_portrait_segmentation | InputProjectionA | false | 951 | [
"MIT"
] | 0 | 6d0cec0a953dacbc94a01ea8b719feb687b7c029 | https://github.com/PhillipHuang2017/ext_portrait_segmentation/tree/6d0cec0a953dacbc94a01ea8b719feb687b7c029 | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class Model(nn.Module):
"""
This class projects the input image to the same spatial dimensions as the feature map.
For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then
this class will generate an output of 56x56x3
"""
def __init__(self, samplingTimes):
"""
:param samplingTimes: The rate at which you want to down-sample the image
"""
super().__init__()
self.pool = nn.ModuleList()
for i in range(0, samplingTimes):
self.pool.append(nn.AvgPool2d(2, stride=2, padding=0))
def forward(self, input):
"""
:param input: Input RGB Image
:return: down-sampled image (pyramid-based approach)
"""
for pool in self.pool:
input = pool(input)
return input
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [4]
|
SelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/sr/csrg6irduolxnaubd5v3tlh5eeuhw27sxkg3o56t4veh47sq6ce3.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 2
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/w5/cw5gytijzzkwnfpq2a2axdsj4pfxgxmwiuzizuyd4bw5uwnanzw7.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ry/cryr7ctvhiha3ewxbbo6dgnuga42vxmuwqebgb2reyd6dilprrd4.py
# Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# out_2 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_8, %view_3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
triton_poi_fused_mul_3 = async_compile.triton('triton_poi_fused_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (2, ), (1, ))
assert_size_stride(primals_4, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (2, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 128, grid=grid(128), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 2, 4, 4), (32, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf3, primals_5, 128, grid=grid(128), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 2), (32, 1, 16), 0), reinterpret_tensor(buf3, (4, 2, 16), (32, 16, 1), 0), out=buf4)
buf7 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf4, buf7, 64, 16, grid=grid(64), stream=stream0)
del buf4
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf9, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf7, (4, 16, 16), (256, 1, 16), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul]
triton_poi_fused_mul_3.run(primals_8, buf10, primals_1, buf11, 256, grid=grid(256), stream=stream0)
return (buf11, buf7, primals_1, primals_2, primals_4, primals_6, primals_8, buf7, buf10, reinterpret_tensor(buf9, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf1, (4, 2, 16), (32, 16, 1), 0), reinterpret_tensor(buf3, (4, 16, 2), (32, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class SelfAttention(nn.Module):
""" Self attention Layer"""
def __init__(self, in_dim, activation):
super(SelfAttention, self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
2, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
2, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize, C, width, height = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height
).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma * out * x
return out, attention
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'activation': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 * tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (2,), (1,))
assert_size_stride(primals_4, (2, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (2,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 4, 4), (32, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(128)](buf1, primals_3, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 2, 4, 4), (32, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_0[grid(128)](buf3, primals_5, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 16, 2), (32, 1, 16),
0), reinterpret_tensor(buf3, (4, 2, 16), (32, 16, 1), 0), out=buf4)
buf7 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf4
buf8 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_2[grid(256)](buf9, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf9, (4, 4, 16), (64, 16, 1),
0), reinterpret_tensor(buf7, (4, 16, 16), (256, 1, 16), 0), out
=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_3[grid(256)](primals_8, buf10, primals_1,
buf11, 256, XBLOCK=256, num_warps=4, num_stages=1)
return (buf11, buf7, primals_1, primals_2, primals_4, primals_6,
primals_8, buf7, buf10, reinterpret_tensor(buf9, (4, 16, 4), (64, 1,
16), 0), reinterpret_tensor(buf1, (4, 2, 16), (32, 16, 1), 0),
reinterpret_tensor(buf3, (4, 16, 2), (32, 1, 16), 0))
class SelfAttentionNew(nn.Module):
""" Self attention Layer"""
def __init__(self, in_dim, activation):
super(SelfAttentionNew, self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
2, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
2, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_0):
primals_8 = self.gamma
primals_2 = self.query_conv.weight
primals_3 = self.query_conv.bias
primals_4 = self.key_conv.weight
primals_5 = self.key_conv.bias
primals_6 = self.value_conv.weight
primals_7 = self.value_conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| PauPerezT/EmoSSpeech | SelfAttention | false | 952 | [
"Apache-2.0"
] | 0 | 168a951a838d0bfb838e7d0e3f6895bff68164a4 | https://github.com/PauPerezT/EmoSSpeech/tree/168a951a838d0bfb838e7d0e3f6895bff68164a4 | import torch
from torch import nn
class Model(nn.Module):
""" Self attention Layer"""
def __init__(self, in_dim, activation):
super().__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
2, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim //
2, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim,
kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize, C, width, height = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height
).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma * out * x
return out, attention
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Environment | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/jv/cjvfpvazszqsn7k2c7ac25njk43pn5fjlaxzgkwwsgomov2lqu5x.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (24, 4), (4, 1))
assert_size_stride(primals_2, (24, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (24, 24), (24, 1))
assert_size_stride(primals_5, (24, ), (1, ))
assert_size_stride(primals_6, (4, 24), (24, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 24), (384, 96, 24, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 1536, grid=grid(1536), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(primals_4, (24, 24), (1, 24), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 24), (384, 96, 24, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 1536, grid=grid(1536), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 24), (24, 1), 0), reinterpret_tensor(primals_6, (24, 4), (1, 24), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(buf3, (64, 24), (24, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((24, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((24, 24), (24, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 24), (24, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class Environment(nn.Module):
"""Defines the Environment module as an ANN"""
def __init__(self, state_size: 'int', action_size: 'int', fc1: 'int'=24,
fc2: 'int'=24):
super(Environment, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.fc1 = nn.Linear(action_size, fc1)
self.fc2 = nn.Linear(fc1, fc2)
self.fc3 = nn.Linear(fc2, state_size)
def forward(self, action):
"""Environment unit forward pass. Input action, a_t. Output, s_t+1."""
x = self.fc1(action)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (24, 4), (4, 1))
assert_size_stride(primals_2, (24,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (24, 24), (24, 1))
assert_size_stride(primals_5, (24,), (1,))
assert_size_stride(primals_6, (4, 24), (24, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 24), (384, 96, 24, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf1,
primals_2, buf6, 1536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 24), (24, 1), 0),
reinterpret_tensor(primals_4, (24, 24), (1, 24), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 24), (384, 96, 24, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf3,
primals_5, buf5, 1536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 24),
(24, 1), 0), reinterpret_tensor(primals_6, (24, 4), (1, 24), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(
buf3, (64, 24), (24, 1), 0), primals_6, buf5, primals_4, buf6
class EnvironmentNew(nn.Module):
"""Defines the Environment module as an ANN"""
def __init__(self, state_size: 'int', action_size: 'int', fc1: 'int'=24,
fc2: 'int'=24):
super(EnvironmentNew, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.fc1 = nn.Linear(action_size, fc1)
self.fc2 = nn.Linear(fc1, fc2)
self.fc3 = nn.Linear(fc2, state_size)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| QasimWani/EARL | Environment | false | 953 | [
"MIT"
] | 0 | 05c303335e67903380771c4954a5317bd46fc0e7 | https://github.com/QasimWani/EARL/tree/05c303335e67903380771c4954a5317bd46fc0e7 | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
"""Defines the Environment module as an ANN"""
def __init__(self, state_size: 'int', action_size: 'int', fc1: 'int'=24,
fc2: 'int'=24):
super().__init__()
self.state_size = state_size
self.action_size = action_size
self.fc1 = nn.Linear(action_size, fc1)
self.fc2 = nn.Linear(fc1, fc2)
self.fc3 = nn.Linear(fc2, state_size)
def forward(self, action):
"""Environment unit forward pass. Input action, a_t. Output, s_t+1."""
x = self.fc1(action)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
attention2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wu/cwuhjqrxupg6y2xkpm7lucxrqveop2y2vttpjm35nj72mklqdbod.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cf/ccf6eqh4cdg7oqni4fhofy7qpwwcxmm2ax3u7y2hcrla7ibifzai.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qs/cqsyda2m63ct5ijcfgcipyyfn273chi5d3kmpjuf5asa7h4wdpdv.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 16, 1, 1), (16, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 1, 1), (16, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
del buf5
return (buf6, primals_2, primals_3, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class attention2d(nn.Module):
def __init__(self, in_planes, ratios, K, temperature, init_weight=True):
super(attention2d, self).__init__()
assert temperature % 3 == 1
self.avgpool = nn.AdaptiveAvgPool2d(1)
if in_planes != 3:
hidden_planes = int(in_planes * ratios)
else:
hidden_planes = K
self.fc1 = nn.Conv2d(in_planes, hidden_planes, 1, bias=False)
self.fc2 = nn.Conv2d(hidden_planes, K, 1, bias=False)
self.temperature = temperature
if init_weight:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='leaky_relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def updata_temperature(self):
if self.temperature != 1:
self.temperature -= 1
def forward(self, x):
x = self.avgpool(x)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x).view(x.size(0), -1)
return F.softmax(x / self.temperature, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'ratios': 4, 'K': 4, 'temperature': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (16, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 16, 1, 1), (16, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 16, 1, 1), (16, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(64)](buf3, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0)
del buf4
triton_poi_fused__softmax_3[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf5
return buf6, primals_2, primals_3, buf1, buf3, buf6
class attention2dNew(nn.Module):
def __init__(self, in_planes, ratios, K, temperature, init_weight=True):
super(attention2dNew, self).__init__()
assert temperature % 3 == 1
self.avgpool = nn.AdaptiveAvgPool2d(1)
if in_planes != 3:
hidden_planes = int(in_planes * ratios)
else:
hidden_planes = K
self.fc1 = nn.Conv2d(in_planes, hidden_planes, 1, bias=False)
self.fc2 = nn.Conv2d(hidden_planes, K, 1, bias=False)
self.temperature = temperature
if init_weight:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='leaky_relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def updata_temperature(self):
if self.temperature != 1:
self.temperature -= 1
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| PengJingchao/DFNet | attention2d | false | 954 | [
"MIT"
] | 0 | 49e83501f81515aebca211351e315896da7afc54 | https://github.com/PengJingchao/DFNet/tree/49e83501f81515aebca211351e315896da7afc54 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_planes, ratios, K, temperature, init_weight=True):
super().__init__()
assert temperature % 3 == 1
self.avgpool = nn.AdaptiveAvgPool2d(1)
if in_planes != 3:
hidden_planes = int(in_planes * ratios)
else:
hidden_planes = K
self.fc1 = nn.Conv2d(in_planes, hidden_planes, 1, bias=False)
self.fc2 = nn.Conv2d(hidden_planes, K, 1, bias=False)
self.temperature = temperature
if init_weight:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='leaky_relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def updata_temperature(self):
if self.temperature != 1:
self.temperature -= 1
def forward(self, x):
x = self.avgpool(x)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x).view(x.size(0), -1)
return F.softmax(x / self.temperature, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 4]
|
ShearY | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/64/c64wcolc5y2n37sxtnpnhr2dzw4gq7giy4xyi4ub5z3kplhqckpr.py
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
# Source node to ATen node mapping:
# fill_ => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %full_default, 3, 2), kwargs = {})
triton_poi_fused_fill_0 = async_compile.triton('triton_poi_fused_fill_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x2 = (xindex // 12)
x1 = (xindex // 3) % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = x2
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 1.0
tmp10 = tmp6 * tmp9
tmp11 = -1.5
tmp12 = tmp10 + tmp11
tmp13 = 3 + ((-1)*x2)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.5
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = x1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp22 < tmp7
tmp24 = tmp22 * tmp9
tmp25 = tmp24 + tmp11
tmp26 = 3 + ((-1)*x1)
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp9
tmp29 = tmp16 - tmp28
tmp30 = tl.where(tmp23, tmp25, tmp29)
tmp31 = float("nan")
tmp32 = tl.where(tmp20, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp18, tmp32)
tmp34 = tl.where(tmp2, tmp9, tmp33)
tl.store(out_ptr0 + (x4), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/rd/crdafhjrsyfl64e6v5l4z7fblhdi2w6wskuhi24d4cbgme3tccoz.py
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta], Original ATen: [aten.lift_fresh, aten.div]
# Source node to ATen node mapping:
# rescaled_theta => div
# tensor_1 => lift_fresh_copy_1
# Graph fragment:
# %lift_fresh_copy_1 : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%_tensor_constant1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute, %lift_fresh_copy_1), kwargs = {})
triton_poi_fused_div_lift_fresh_1 = async_compile.triton('triton_poi_fused_div_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x1 + (3*x0)
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp7)
tmp9 = 1.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.full([1], 5, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.where(tmp14, tmp9, tmp7)
tmp16 = -0.737263560295105
tmp17 = tl.where(tmp12, tmp16, tmp15)
tmp18 = tl.where(tmp2, tmp10, tmp17)
tmp19 = x0
tmp20 = tmp19 < tmp3
tmp21 = 2.0
tmp22 = tl.where(tmp20, tmp21, tmp21)
tmp23 = tmp18 / tmp22
tl.store(out_ptr0 + (x2), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/zf/czfnj57ejqlqojplqt6piz7sosofcvzqdxhskwatgn32njr7acz7.py
# Topologically Sorted Source Nodes: [img], Original ATen: [aten.grid_sampler_2d]
# Source node to ATen node mapping:
# img => add_2, add_3, full_default_3, full_default_4, ge, ge_1, index, logical_and, logical_and_1, logical_and_2, lt_2, lt_3, mul_4, mul_5, mul_6, round_1, round_2, where_4
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, 2.0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 1.5), kwargs = {})
# %round_1 : [num_users=3] = call_function[target=torch.ops.aten.round.default](args = (%add_2,), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%round_1, 0), kwargs = {})
# %lt_2 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%round_1, 4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_9, 2.0), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 1.5), kwargs = {})
# %round_2 : [num_users=3] = call_function[target=torch.ops.aten.round.default](args = (%add_3,), kwargs = {})
# %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%round_2, 0), kwargs = {})
# %lt_3 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%round_2, 4), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_1, %lt_3), kwargs = {})
# %logical_and_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_2, %logical_and), kwargs = {})
# %logical_and_2 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %logical_and_1), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [%view_5, %view_6, %where_3, %where_2]), kwargs = {})
# %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_2, %full_default_4, %full_default_3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %where_4), kwargs = {})
triton_poi_fused_grid_sampler_2d_2 = async_compile.triton('triton_poi_fused_grid_sampler_2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_grid_sampler_2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_grid_sampler_2d_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.5
tmp4 = tmp2 + tmp3
tmp5 = libdevice.nearbyint(tmp4)
tmp6 = 0.0
tmp7 = tmp5 >= tmp6
tmp8 = 4.0
tmp9 = tmp5 < tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp11 + tmp3
tmp13 = libdevice.nearbyint(tmp12)
tmp14 = tmp13 >= tmp6
tmp15 = tmp13 < tmp8
tmp16 = tmp14 & tmp15
tmp17 = tmp9 & tmp16
tmp18 = tmp7 & tmp17
tmp19 = tmp13.to(tl.int64)
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert(((0 <= tmp25) & (tmp25 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp25 < 4")
tmp27 = tmp5.to(tl.int64)
tmp28 = tl.where(tmp18, tmp27, tmp20)
tmp29 = tmp28 + tmp22
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert(((0 <= tmp31) & (tmp31 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp31 < 4")
tmp33 = tl.load(in_ptr1 + (tmp31 + (4*tmp25) + (16*x3)), xmask, eviction_policy='evict_last')
tmp34 = tl.full([1], 1, tl.int64)
tmp35 = tl.where(tmp18, tmp34, tmp20)
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp33 * tmp36
tl.store(out_ptr0 + (x4), tmp37, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((1, 4, 4, 3), (48, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
stream0 = get_raw_stream(0)
triton_poi_fused_fill_0.run(buf2, 48, grid=grid(48), stream=stream0)
buf3 = empty_strided_cuda((1, 3, 2), (6, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta], Original ATen: [aten.lift_fresh, aten.div]
triton_poi_fused_div_lift_fresh_1.run(buf3, 6, grid=grid(6), stream=stream0)
buf4 = empty_strided_cuda((1, 16, 2), (32, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta, output_grid], Original ATen: [aten.lift_fresh, aten.div, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 3), (48, 3, 1), 0), buf3, out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [img], Original ATen: [aten.grid_sampler_2d]
triton_poi_fused_grid_sampler_2d_2.run(buf4, arg0_1, buf5, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf4
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torchvision import transforms as ttf
class ShearY(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, img):
return ttf.functional.affine(img, 0, [0, 0], 1, [0, self.angle])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'M': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x2 = xindex // 12
x1 = xindex // 3 % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = x2
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 1.0
tmp10 = tmp6 * tmp9
tmp11 = -1.5
tmp12 = tmp10 + tmp11
tmp13 = 3 + -1 * x2
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.5
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = x1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp22 < tmp7
tmp24 = tmp22 * tmp9
tmp25 = tmp24 + tmp11
tmp26 = 3 + -1 * x1
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp9
tmp29 = tmp16 - tmp28
tmp30 = tl.where(tmp23, tmp25, tmp29)
tmp31 = float('nan')
tmp32 = tl.where(tmp20, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp18, tmp32)
tmp34 = tl.where(tmp2, tmp9, tmp33)
tl.store(out_ptr0 + x4, tmp34, xmask)
@triton.jit
def triton_poi_fused_div_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x1 + 3 * x0
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp7)
tmp9 = 1.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.full([1], 5, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.where(tmp14, tmp9, tmp7)
tmp16 = -0.737263560295105
tmp17 = tl.where(tmp12, tmp16, tmp15)
tmp18 = tl.where(tmp2, tmp10, tmp17)
tmp19 = x0
tmp20 = tmp19 < tmp3
tmp21 = 2.0
tmp22 = tl.where(tmp20, tmp21, tmp21)
tmp23 = tmp18 / tmp22
tl.store(out_ptr0 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_grid_sampler_2d_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.5
tmp4 = tmp2 + tmp3
tmp5 = libdevice.nearbyint(tmp4)
tmp6 = 0.0
tmp7 = tmp5 >= tmp6
tmp8 = 4.0
tmp9 = tmp5 < tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp11 + tmp3
tmp13 = libdevice.nearbyint(tmp12)
tmp14 = tmp13 >= tmp6
tmp15 = tmp13 < tmp8
tmp16 = tmp14 & tmp15
tmp17 = tmp9 & tmp16
tmp18 = tmp7 & tmp17
tmp19 = tmp13.to(tl.int64)
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert((0 <= tmp25) & (tmp25 < 4) | ~xmask,
'index out of bounds: 0 <= tmp25 < 4')
tmp27 = tmp5.to(tl.int64)
tmp28 = tl.where(tmp18, tmp27, tmp20)
tmp29 = tmp28 + tmp22
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert((0 <= tmp31) & (tmp31 < 4) | ~xmask,
'index out of bounds: 0 <= tmp31 < 4')
tmp33 = tl.load(in_ptr1 + (tmp31 + 4 * tmp25 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp34 = tl.full([1], 1, tl.int64)
tmp35 = tl.where(tmp18, tmp34, tmp20)
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp33 * tmp36
tl.store(out_ptr0 + x4, tmp37, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((1, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_fill_0[grid(48)](buf2, 48, XBLOCK=64, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((1, 3, 2), (6, 2, 1), torch.float32)
triton_poi_fused_div_lift_fresh_1[grid(6)](buf3, 6, XBLOCK=8,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((1, 16, 2), (32, 2, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 3), (48, 3, 1),
0), buf3, out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_grid_sampler_2d_2[grid(256)](buf4, arg0_1, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf4
return buf5,
class ShearYNew(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Hayoung93/UDA | ShearY | false | 955 | [
"Apache-2.0"
] | 0 | a587b01c76141d64e7cead55b62e0f3ed75890bf | https://github.com/Hayoung93/UDA/tree/a587b01c76141d64e7cead55b62e0f3ed75890bf | import torch
import torch.nn as nn
from torchvision import transforms as ttf
class Model(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, img):
return ttf.functional.affine(img, 0, [0, 0], 1, [0, self.angle])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
fully_conv_layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/zo/czobpmlyr5atbcpsuque6vcmk7nafmb3smtbzoqilz46drm7zbkm.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class fully_conv_layer(nn.Module):
def __init__(self, c):
super(fully_conv_layer, self).__init__()
self.conv = nn.Conv2d(c, 1, 1)
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class fully_conv_layerNew(nn.Module):
def __init__(self, c):
super(fully_conv_layerNew, self).__init__()
self.conv = nn.Conv2d(c, 1, 1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| QiweiMa-LL/STAGCN | fully_conv_layer | false | 956 | [
"MIT"
] | 0 | c6889c845ac7fcba4419b2727022a599981f2a54 | https://github.com/QiweiMa-LL/STAGCN/tree/c6889c845ac7fcba4419b2727022a599981f2a54 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, c):
super().__init__()
self.conv = nn.Conv2d(c, 1, 1)
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
HSwish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/jj/cjjcpa4jfom3kmx4ufnxtda3bmq466cpemkegyhzep2ymmlsg35l.py
# Topologically Sorted Source Nodes: [add, relu6, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# mul => mul
# relu6 => clamp_max, clamp_min
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %clamp_max), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6.0), kwargs = {})
triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, mul, truediv], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.optim
import torch.nn.parallel
import torch.utils.data.distributed
class HSwish(nn.Module):
""" Applies the Hard-Swish function element-wise.
`"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_
Examples:
>>> m = Mish()
>>> x = torch.randn(2)
>>> output = m(x)
"""
@staticmethod
def forward(x: 'torch.Tensor') ->torch.Tensor:
return x * F.relu6(x + 3, inplace=True) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional
import torch.optim
import torch.nn.parallel
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HSwishNew(nn.Module):
""" Applies the Hard-Swish function element-wise.
`"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_
Examples:
>>> m = Mish()
>>> x = torch.randn(2)
>>> output = m(x)
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| PhelaPoscam/SRGAN-PyTorch | HSwish | false | 957 | [
"Apache-2.0"
] | 0 | c1c68707dbddd1130b2ea71023df748080bcbd52 | https://github.com/PhelaPoscam/SRGAN-PyTorch/tree/c1c68707dbddd1130b2ea71023df748080bcbd52 | import torch
import torch.utils.data
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.functional
import torch.optim
import torch.nn.parallel
import torch.utils.data.distributed
class Model(nn.Module):
""" Applies the Hard-Swish function element-wise.
`"Searching for MobileNetV3" <https://arxiv.org/pdf/1905.02244.pdf>`_
Examples:
>>> m = Mish()
>>> x = torch.randn(2)
>>> output = m(x)
"""
@staticmethod
def forward(x: 'torch.Tensor') ->torch.Tensor:
return x * F.relu6(x + 3, inplace=True) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TranslateX | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/64/c64wcolc5y2n37sxtnpnhr2dzw4gq7giy4xyi4ub5z3kplhqckpr.py
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
# Source node to ATen node mapping:
# fill_ => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %full_default, 3, 2), kwargs = {})
triton_poi_fused_fill_0 = async_compile.triton('triton_poi_fused_fill_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x2 = (xindex // 12)
x1 = (xindex // 3) % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = x2
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 1.0
tmp10 = tmp6 * tmp9
tmp11 = -1.5
tmp12 = tmp10 + tmp11
tmp13 = 3 + ((-1)*x2)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.5
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = x1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp22 < tmp7
tmp24 = tmp22 * tmp9
tmp25 = tmp24 + tmp11
tmp26 = 3 + ((-1)*x1)
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp9
tmp29 = tmp16 - tmp28
tmp30 = tl.where(tmp23, tmp25, tmp29)
tmp31 = float("nan")
tmp32 = tl.where(tmp20, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp18, tmp32)
tmp34 = tl.where(tmp2, tmp9, tmp33)
tl.store(out_ptr0 + (x4), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ks/cksu4qufrzd37pfs4xbfk5etqfdciowiraecduqaxu3kwb4zlzgg.py
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta], Original ATen: [aten.lift_fresh, aten.div]
# Source node to ATen node mapping:
# rescaled_theta => div
# tensor_1 => lift_fresh_copy_1
# Graph fragment:
# %lift_fresh_copy_1 : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%_tensor_constant1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute, %lift_fresh_copy_1), kwargs = {})
triton_poi_fused_div_lift_fresh_1 = async_compile.triton('triton_poi_fused_div_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x1 + (3*x0)
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = 0.0
tmp8 = -1.2000000476837158
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = 1.0
tmp11 = tl.where(tmp4, tmp10, tmp9)
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.full([1], 5, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tl.where(tmp15, tmp10, tmp7)
tmp17 = -0.0
tmp18 = tl.where(tmp13, tmp17, tmp16)
tmp19 = tl.where(tmp2, tmp11, tmp18)
tmp20 = x0
tmp21 = tmp20 < tmp3
tmp22 = 2.0
tmp23 = tl.where(tmp21, tmp22, tmp22)
tmp24 = tmp19 / tmp23
tl.store(out_ptr0 + (x2), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/zf/czfnj57ejqlqojplqt6piz7sosofcvzqdxhskwatgn32njr7acz7.py
# Topologically Sorted Source Nodes: [img], Original ATen: [aten.grid_sampler_2d]
# Source node to ATen node mapping:
# img => add_2, add_3, full_default_3, full_default_4, ge, ge_1, index, logical_and, logical_and_1, logical_and_2, lt_2, lt_3, mul_4, mul_5, mul_6, round_1, round_2, where_4
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, 2.0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 1.5), kwargs = {})
# %round_1 : [num_users=3] = call_function[target=torch.ops.aten.round.default](args = (%add_2,), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%round_1, 0), kwargs = {})
# %lt_2 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%round_1, 4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_9, 2.0), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 1.5), kwargs = {})
# %round_2 : [num_users=3] = call_function[target=torch.ops.aten.round.default](args = (%add_3,), kwargs = {})
# %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%round_2, 0), kwargs = {})
# %lt_3 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%round_2, 4), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_1, %lt_3), kwargs = {})
# %logical_and_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_2, %logical_and), kwargs = {})
# %logical_and_2 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %logical_and_1), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [%view_5, %view_6, %where_3, %where_2]), kwargs = {})
# %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_2, %full_default_4, %full_default_3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %where_4), kwargs = {})
triton_poi_fused_grid_sampler_2d_2 = async_compile.triton('triton_poi_fused_grid_sampler_2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_grid_sampler_2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_grid_sampler_2d_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.5
tmp4 = tmp2 + tmp3
tmp5 = libdevice.nearbyint(tmp4)
tmp6 = 0.0
tmp7 = tmp5 >= tmp6
tmp8 = 4.0
tmp9 = tmp5 < tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp11 + tmp3
tmp13 = libdevice.nearbyint(tmp12)
tmp14 = tmp13 >= tmp6
tmp15 = tmp13 < tmp8
tmp16 = tmp14 & tmp15
tmp17 = tmp9 & tmp16
tmp18 = tmp7 & tmp17
tmp19 = tmp13.to(tl.int64)
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert(((0 <= tmp25) & (tmp25 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp25 < 4")
tmp27 = tmp5.to(tl.int64)
tmp28 = tl.where(tmp18, tmp27, tmp20)
tmp29 = tmp28 + tmp22
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert(((0 <= tmp31) & (tmp31 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp31 < 4")
tmp33 = tl.load(in_ptr1 + (tmp31 + (4*tmp25) + (16*x3)), xmask, eviction_policy='evict_last')
tmp34 = tl.full([1], 1, tl.int64)
tmp35 = tl.where(tmp18, tmp34, tmp20)
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp33 * tmp36
tl.store(out_ptr0 + (x4), tmp37, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((1, 4, 4, 3), (48, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
stream0 = get_raw_stream(0)
triton_poi_fused_fill_0.run(buf2, 48, grid=grid(48), stream=stream0)
buf3 = empty_strided_cuda((1, 3, 2), (6, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta], Original ATen: [aten.lift_fresh, aten.div]
triton_poi_fused_div_lift_fresh_1.run(buf3, 6, grid=grid(6), stream=stream0)
buf4 = empty_strided_cuda((1, 16, 2), (32, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta, output_grid], Original ATen: [aten.lift_fresh, aten.div, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 3), (48, 3, 1), 0), buf3, out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [img], Original ATen: [aten.grid_sampler_2d]
triton_poi_fused_grid_sampler_2d_2.run(buf4, arg0_1, buf5, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf4
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torchvision import transforms as ttf
class TranslateX(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
try:
max_size = img.size()[0]
except TypeError:
max_size = img.size()[0]
return ttf.functional.affine(img, 0, [(max_size - 1) / 10 * self.M,
0], 1, [0, 0])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'M': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x2 = xindex // 12
x1 = xindex // 3 % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = x2
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 1.0
tmp10 = tmp6 * tmp9
tmp11 = -1.5
tmp12 = tmp10 + tmp11
tmp13 = 3 + -1 * x2
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.5
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = x1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp22 < tmp7
tmp24 = tmp22 * tmp9
tmp25 = tmp24 + tmp11
tmp26 = 3 + -1 * x1
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp9
tmp29 = tmp16 - tmp28
tmp30 = tl.where(tmp23, tmp25, tmp29)
tmp31 = float('nan')
tmp32 = tl.where(tmp20, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp18, tmp32)
tmp34 = tl.where(tmp2, tmp9, tmp33)
tl.store(out_ptr0 + x4, tmp34, xmask)
@triton.jit
def triton_poi_fused_div_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x1 + 3 * x0
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = 0.0
tmp8 = -1.2000000476837158
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = 1.0
tmp11 = tl.where(tmp4, tmp10, tmp9)
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.full([1], 5, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tl.where(tmp15, tmp10, tmp7)
tmp17 = -0.0
tmp18 = tl.where(tmp13, tmp17, tmp16)
tmp19 = tl.where(tmp2, tmp11, tmp18)
tmp20 = x0
tmp21 = tmp20 < tmp3
tmp22 = 2.0
tmp23 = tl.where(tmp21, tmp22, tmp22)
tmp24 = tmp19 / tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_grid_sampler_2d_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.5
tmp4 = tmp2 + tmp3
tmp5 = libdevice.nearbyint(tmp4)
tmp6 = 0.0
tmp7 = tmp5 >= tmp6
tmp8 = 4.0
tmp9 = tmp5 < tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp11 + tmp3
tmp13 = libdevice.nearbyint(tmp12)
tmp14 = tmp13 >= tmp6
tmp15 = tmp13 < tmp8
tmp16 = tmp14 & tmp15
tmp17 = tmp9 & tmp16
tmp18 = tmp7 & tmp17
tmp19 = tmp13.to(tl.int64)
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert((0 <= tmp25) & (tmp25 < 4) | ~xmask,
'index out of bounds: 0 <= tmp25 < 4')
tmp27 = tmp5.to(tl.int64)
tmp28 = tl.where(tmp18, tmp27, tmp20)
tmp29 = tmp28 + tmp22
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert((0 <= tmp31) & (tmp31 < 4) | ~xmask,
'index out of bounds: 0 <= tmp31 < 4')
tmp33 = tl.load(in_ptr1 + (tmp31 + 4 * tmp25 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp34 = tl.full([1], 1, tl.int64)
tmp35 = tl.where(tmp18, tmp34, tmp20)
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp33 * tmp36
tl.store(out_ptr0 + x4, tmp37, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((1, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_fill_0[grid(48)](buf2, 48, XBLOCK=64, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((1, 3, 2), (6, 2, 1), torch.float32)
triton_poi_fused_div_lift_fresh_1[grid(6)](buf3, 6, XBLOCK=8,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((1, 16, 2), (32, 2, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 3), (48, 3, 1),
0), buf3, out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_grid_sampler_2d_2[grid(256)](buf4, arg0_1, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf4
return buf5,
class TranslateXNew(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Hayoung93/UDA | TranslateX | false | 958 | [
"Apache-2.0"
] | 0 | a587b01c76141d64e7cead55b62e0f3ed75890bf | https://github.com/Hayoung93/UDA/tree/a587b01c76141d64e7cead55b62e0f3ed75890bf | import torch
import torch.nn as nn
from torchvision import transforms as ttf
class Model(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
try:
max_size = img.size()[0]
except TypeError:
max_size = img.size()[0]
return ttf.functional.affine(img, 0, [(max_size - 1) / 10 * self.M,
0], 1, [0, 0])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Rotate | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/64/c64wcolc5y2n37sxtnpnhr2dzw4gq7giy4xyi4ub5z3kplhqckpr.py
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
# Source node to ATen node mapping:
# fill_ => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %full_default, 3, 2), kwargs = {})
triton_poi_fused_fill_0 = async_compile.triton('triton_poi_fused_fill_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x2 = (xindex // 12)
x1 = (xindex // 3) % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = x2
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 1.0
tmp10 = tmp6 * tmp9
tmp11 = -1.5
tmp12 = tmp10 + tmp11
tmp13 = 3 + ((-1)*x2)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.5
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = x1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp22 < tmp7
tmp24 = tmp22 * tmp9
tmp25 = tmp24 + tmp11
tmp26 = 3 + ((-1)*x1)
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp9
tmp29 = tmp16 - tmp28
tmp30 = tl.where(tmp23, tmp25, tmp29)
tmp31 = float("nan")
tmp32 = tl.where(tmp20, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp18, tmp32)
tmp34 = tl.where(tmp2, tmp9, tmp33)
tl.store(out_ptr0 + (x4), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vl/cvlk4i6bpsuyhoe5owdgq5irolr3p5hp23dqwsg3bzba5cjk5ie5.py
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta], Original ATen: [aten.lift_fresh, aten.div]
# Source node to ATen node mapping:
# rescaled_theta => div
# tensor_1 => lift_fresh_copy_1
# Graph fragment:
# %lift_fresh_copy_1 : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%_tensor_constant1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute, %lift_fresh_copy_1), kwargs = {})
triton_poi_fused_div_lift_fresh_1 = async_compile.triton('triton_poi_fused_div_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x1 + (3*x0)
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = -0.5934188961982727
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = -0.8048937916755676
tmp11 = tl.where(tmp4, tmp10, tmp9)
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.full([1], 5, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tl.where(tmp15, tmp10, tmp8)
tmp17 = 0.5934188961982727
tmp18 = tl.where(tmp13, tmp17, tmp16)
tmp19 = tl.where(tmp2, tmp11, tmp18)
tmp20 = x0
tmp21 = tmp20 < tmp3
tmp22 = 2.0
tmp23 = tl.where(tmp21, tmp22, tmp22)
tmp24 = tmp19 / tmp23
tl.store(out_ptr0 + (x2), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/zf/czfnj57ejqlqojplqt6piz7sosofcvzqdxhskwatgn32njr7acz7.py
# Topologically Sorted Source Nodes: [img], Original ATen: [aten.grid_sampler_2d]
# Source node to ATen node mapping:
# img => add_2, add_3, full_default_3, full_default_4, ge, ge_1, index, logical_and, logical_and_1, logical_and_2, lt_2, lt_3, mul_4, mul_5, mul_6, round_1, round_2, where_4
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, 2.0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 1.5), kwargs = {})
# %round_1 : [num_users=3] = call_function[target=torch.ops.aten.round.default](args = (%add_2,), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%round_1, 0), kwargs = {})
# %lt_2 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%round_1, 4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_9, 2.0), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 1.5), kwargs = {})
# %round_2 : [num_users=3] = call_function[target=torch.ops.aten.round.default](args = (%add_3,), kwargs = {})
# %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%round_2, 0), kwargs = {})
# %lt_3 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%round_2, 4), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_1, %lt_3), kwargs = {})
# %logical_and_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_2, %logical_and), kwargs = {})
# %logical_and_2 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %logical_and_1), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [%view_5, %view_6, %where_3, %where_2]), kwargs = {})
# %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_2, %full_default_4, %full_default_3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %where_4), kwargs = {})
triton_poi_fused_grid_sampler_2d_2 = async_compile.triton('triton_poi_fused_grid_sampler_2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_grid_sampler_2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_grid_sampler_2d_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.5
tmp4 = tmp2 + tmp3
tmp5 = libdevice.nearbyint(tmp4)
tmp6 = 0.0
tmp7 = tmp5 >= tmp6
tmp8 = 4.0
tmp9 = tmp5 < tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp11 + tmp3
tmp13 = libdevice.nearbyint(tmp12)
tmp14 = tmp13 >= tmp6
tmp15 = tmp13 < tmp8
tmp16 = tmp14 & tmp15
tmp17 = tmp9 & tmp16
tmp18 = tmp7 & tmp17
tmp19 = tmp13.to(tl.int64)
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert(((0 <= tmp25) & (tmp25 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp25 < 4")
tmp27 = tmp5.to(tl.int64)
tmp28 = tl.where(tmp18, tmp27, tmp20)
tmp29 = tmp28 + tmp22
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert(((0 <= tmp31) & (tmp31 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp31 < 4")
tmp33 = tl.load(in_ptr1 + (tmp31 + (4*tmp25) + (16*x3)), xmask, eviction_policy='evict_last')
tmp34 = tl.full([1], 1, tl.int64)
tmp35 = tl.where(tmp18, tmp34, tmp20)
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp33 * tmp36
tl.store(out_ptr0 + (x4), tmp37, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((1, 4, 4, 3), (48, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
stream0 = get_raw_stream(0)
triton_poi_fused_fill_0.run(buf2, 48, grid=grid(48), stream=stream0)
buf3 = empty_strided_cuda((1, 3, 2), (6, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta], Original ATen: [aten.lift_fresh, aten.div]
triton_poi_fused_div_lift_fresh_1.run(buf3, 6, grid=grid(6), stream=stream0)
buf4 = empty_strided_cuda((1, 16, 2), (32, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta, output_grid], Original ATen: [aten.lift_fresh, aten.div, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 3), (48, 3, 1), 0), buf3, out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [img], Original ATen: [aten.grid_sampler_2d]
triton_poi_fused_grid_sampler_2d_2.run(buf4, arg0_1, buf5, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf4
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torchvision import transforms as ttf
class Rotate(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M
def forward(self, img):
return ttf.functional.rotate(img, self.angle)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'M': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x2 = xindex // 12
x1 = xindex // 3 % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = x2
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 1.0
tmp10 = tmp6 * tmp9
tmp11 = -1.5
tmp12 = tmp10 + tmp11
tmp13 = 3 + -1 * x2
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.5
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = x1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp22 < tmp7
tmp24 = tmp22 * tmp9
tmp25 = tmp24 + tmp11
tmp26 = 3 + -1 * x1
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp9
tmp29 = tmp16 - tmp28
tmp30 = tl.where(tmp23, tmp25, tmp29)
tmp31 = float('nan')
tmp32 = tl.where(tmp20, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp18, tmp32)
tmp34 = tl.where(tmp2, tmp9, tmp33)
tl.store(out_ptr0 + x4, tmp34, xmask)
@triton.jit
def triton_poi_fused_div_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x1 + 3 * x0
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = -0.5934188961982727
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = -0.8048937916755676
tmp11 = tl.where(tmp4, tmp10, tmp9)
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.full([1], 5, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tl.where(tmp15, tmp10, tmp8)
tmp17 = 0.5934188961982727
tmp18 = tl.where(tmp13, tmp17, tmp16)
tmp19 = tl.where(tmp2, tmp11, tmp18)
tmp20 = x0
tmp21 = tmp20 < tmp3
tmp22 = 2.0
tmp23 = tl.where(tmp21, tmp22, tmp22)
tmp24 = tmp19 / tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_grid_sampler_2d_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.5
tmp4 = tmp2 + tmp3
tmp5 = libdevice.nearbyint(tmp4)
tmp6 = 0.0
tmp7 = tmp5 >= tmp6
tmp8 = 4.0
tmp9 = tmp5 < tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp11 + tmp3
tmp13 = libdevice.nearbyint(tmp12)
tmp14 = tmp13 >= tmp6
tmp15 = tmp13 < tmp8
tmp16 = tmp14 & tmp15
tmp17 = tmp9 & tmp16
tmp18 = tmp7 & tmp17
tmp19 = tmp13.to(tl.int64)
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert((0 <= tmp25) & (tmp25 < 4) | ~xmask,
'index out of bounds: 0 <= tmp25 < 4')
tmp27 = tmp5.to(tl.int64)
tmp28 = tl.where(tmp18, tmp27, tmp20)
tmp29 = tmp28 + tmp22
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert((0 <= tmp31) & (tmp31 < 4) | ~xmask,
'index out of bounds: 0 <= tmp31 < 4')
tmp33 = tl.load(in_ptr1 + (tmp31 + 4 * tmp25 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp34 = tl.full([1], 1, tl.int64)
tmp35 = tl.where(tmp18, tmp34, tmp20)
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp33 * tmp36
tl.store(out_ptr0 + x4, tmp37, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((1, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_fill_0[grid(48)](buf2, 48, XBLOCK=64, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((1, 3, 2), (6, 2, 1), torch.float32)
triton_poi_fused_div_lift_fresh_1[grid(6)](buf3, 6, XBLOCK=8,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((1, 16, 2), (32, 2, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 3), (48, 3, 1),
0), buf3, out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_grid_sampler_2d_2[grid(256)](buf4, arg0_1, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf4
return buf5,
class RotateNew(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Hayoung93/UDA | Rotate | false | 959 | [
"Apache-2.0"
] | 0 | a587b01c76141d64e7cead55b62e0f3ed75890bf | https://github.com/Hayoung93/UDA/tree/a587b01c76141d64e7cead55b62e0f3ed75890bf | import torch
import torch.nn as nn
from torchvision import transforms as ttf
class Model(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M
def forward(self, img):
return ttf.functional.rotate(img, self.angle)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
hswish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/jj/cjjcpa4jfom3kmx4ufnxtda3bmq466cpemkegyhzep2ymmlsg35l.py
# Topologically Sorted Source Nodes: [add, relu6, mul, out], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# mul => mul
# out => div
# relu6 => clamp_max, clamp_min
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %clamp_max), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6), kwargs = {})
triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, mul, out], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class hswish(nn.Module):
def forward(self, x):
out = x * F.relu6(x + 3, inplace=True) / 6
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class hswishNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Qidian213/NAIC2019 | hswish | false | 960 | [
"MIT"
] | 0 | 23e05a8a096168ccfa4d1743467fdf78ffcaabba | https://github.com/Qidian213/NAIC2019/tree/23e05a8a096168ccfa4d1743467fdf78ffcaabba | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def forward(self, x):
out = x * F.relu6(x + 3, inplace=True) / 6
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TranslateY | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/64/c64wcolc5y2n37sxtnpnhr2dzw4gq7giy4xyi4ub5z3kplhqckpr.py
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
# Source node to ATen node mapping:
# fill_ => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %full_default, 3, 2), kwargs = {})
triton_poi_fused_fill_0 = async_compile.triton('triton_poi_fused_fill_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x2 = (xindex // 12)
x1 = (xindex // 3) % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = x2
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 1.0
tmp10 = tmp6 * tmp9
tmp11 = -1.5
tmp12 = tmp10 + tmp11
tmp13 = 3 + ((-1)*x2)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.5
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = x1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp22 < tmp7
tmp24 = tmp22 * tmp9
tmp25 = tmp24 + tmp11
tmp26 = 3 + ((-1)*x1)
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp9
tmp29 = tmp16 - tmp28
tmp30 = tl.where(tmp23, tmp25, tmp29)
tmp31 = float("nan")
tmp32 = tl.where(tmp20, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp18, tmp32)
tmp34 = tl.where(tmp2, tmp9, tmp33)
tl.store(out_ptr0 + (x4), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vi/cviolvz62f25a3spnojjqdhykrfiogx53mbgi3scul2c6ivq7wxd.py
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta], Original ATen: [aten.lift_fresh, aten.div]
# Source node to ATen node mapping:
# rescaled_theta => div
# tensor_1 => lift_fresh_copy_1
# Graph fragment:
# %lift_fresh_copy_1 : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%_tensor_constant1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute, %lift_fresh_copy_1), kwargs = {})
triton_poi_fused_div_lift_fresh_1 = async_compile.triton('triton_poi_fused_div_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x1 + (3*x0)
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp7)
tmp9 = 1.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.full([1], 5, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = -1.2000000476837158
tmp16 = tl.where(tmp14, tmp9, tmp15)
tmp17 = -0.0
tmp18 = tl.where(tmp12, tmp17, tmp16)
tmp19 = tl.where(tmp2, tmp10, tmp18)
tmp20 = x0
tmp21 = tmp20 < tmp3
tmp22 = 2.0
tmp23 = tl.where(tmp21, tmp22, tmp22)
tmp24 = tmp19 / tmp23
tl.store(out_ptr0 + (x2), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/zf/czfnj57ejqlqojplqt6piz7sosofcvzqdxhskwatgn32njr7acz7.py
# Topologically Sorted Source Nodes: [img], Original ATen: [aten.grid_sampler_2d]
# Source node to ATen node mapping:
# img => add_2, add_3, full_default_3, full_default_4, ge, ge_1, index, logical_and, logical_and_1, logical_and_2, lt_2, lt_3, mul_4, mul_5, mul_6, round_1, round_2, where_4
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, 2.0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 1.5), kwargs = {})
# %round_1 : [num_users=3] = call_function[target=torch.ops.aten.round.default](args = (%add_2,), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%round_1, 0), kwargs = {})
# %lt_2 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%round_1, 4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_9, 2.0), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 1.5), kwargs = {})
# %round_2 : [num_users=3] = call_function[target=torch.ops.aten.round.default](args = (%add_3,), kwargs = {})
# %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%round_2, 0), kwargs = {})
# %lt_3 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%round_2, 4), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_1, %lt_3), kwargs = {})
# %logical_and_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_2, %logical_and), kwargs = {})
# %logical_and_2 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %logical_and_1), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [%view_5, %view_6, %where_3, %where_2]), kwargs = {})
# %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_2, %full_default_4, %full_default_3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %where_4), kwargs = {})
triton_poi_fused_grid_sampler_2d_2 = async_compile.triton('triton_poi_fused_grid_sampler_2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_grid_sampler_2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_grid_sampler_2d_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.5
tmp4 = tmp2 + tmp3
tmp5 = libdevice.nearbyint(tmp4)
tmp6 = 0.0
tmp7 = tmp5 >= tmp6
tmp8 = 4.0
tmp9 = tmp5 < tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp11 + tmp3
tmp13 = libdevice.nearbyint(tmp12)
tmp14 = tmp13 >= tmp6
tmp15 = tmp13 < tmp8
tmp16 = tmp14 & tmp15
tmp17 = tmp9 & tmp16
tmp18 = tmp7 & tmp17
tmp19 = tmp13.to(tl.int64)
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert(((0 <= tmp25) & (tmp25 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp25 < 4")
tmp27 = tmp5.to(tl.int64)
tmp28 = tl.where(tmp18, tmp27, tmp20)
tmp29 = tmp28 + tmp22
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert(((0 <= tmp31) & (tmp31 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp31 < 4")
tmp33 = tl.load(in_ptr1 + (tmp31 + (4*tmp25) + (16*x3)), xmask, eviction_policy='evict_last')
tmp34 = tl.full([1], 1, tl.int64)
tmp35 = tl.where(tmp18, tmp34, tmp20)
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp33 * tmp36
tl.store(out_ptr0 + (x4), tmp37, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((1, 4, 4, 3), (48, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
stream0 = get_raw_stream(0)
triton_poi_fused_fill_0.run(buf2, 48, grid=grid(48), stream=stream0)
buf3 = empty_strided_cuda((1, 3, 2), (6, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta], Original ATen: [aten.lift_fresh, aten.div]
triton_poi_fused_div_lift_fresh_1.run(buf3, 6, grid=grid(6), stream=stream0)
buf4 = empty_strided_cuda((1, 16, 2), (32, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta, output_grid], Original ATen: [aten.lift_fresh, aten.div, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 3), (48, 3, 1), 0), buf3, out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [img], Original ATen: [aten.grid_sampler_2d]
triton_poi_fused_grid_sampler_2d_2.run(buf4, arg0_1, buf5, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf4
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torchvision import transforms as ttf
class TranslateY(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
try:
max_size = img.size()[1]
except TypeError:
max_size = img.size()[1]
return ttf.functional.affine(img, 0, [0, (max_size - 1) / 10 * self
.M], 1, [0, 0])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'M': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x2 = xindex // 12
x1 = xindex // 3 % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = x2
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 1.0
tmp10 = tmp6 * tmp9
tmp11 = -1.5
tmp12 = tmp10 + tmp11
tmp13 = 3 + -1 * x2
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.5
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = x1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp22 < tmp7
tmp24 = tmp22 * tmp9
tmp25 = tmp24 + tmp11
tmp26 = 3 + -1 * x1
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp9
tmp29 = tmp16 - tmp28
tmp30 = tl.where(tmp23, tmp25, tmp29)
tmp31 = float('nan')
tmp32 = tl.where(tmp20, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp18, tmp32)
tmp34 = tl.where(tmp2, tmp9, tmp33)
tl.store(out_ptr0 + x4, tmp34, xmask)
@triton.jit
def triton_poi_fused_div_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x1 + 3 * x0
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp7)
tmp9 = 1.0
tmp10 = tl.where(tmp4, tmp9, tmp8)
tmp11 = tl.full([1], 4, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.full([1], 5, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = -1.2000000476837158
tmp16 = tl.where(tmp14, tmp9, tmp15)
tmp17 = -0.0
tmp18 = tl.where(tmp12, tmp17, tmp16)
tmp19 = tl.where(tmp2, tmp10, tmp18)
tmp20 = x0
tmp21 = tmp20 < tmp3
tmp22 = 2.0
tmp23 = tl.where(tmp21, tmp22, tmp22)
tmp24 = tmp19 / tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_grid_sampler_2d_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.5
tmp4 = tmp2 + tmp3
tmp5 = libdevice.nearbyint(tmp4)
tmp6 = 0.0
tmp7 = tmp5 >= tmp6
tmp8 = 4.0
tmp9 = tmp5 < tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp11 + tmp3
tmp13 = libdevice.nearbyint(tmp12)
tmp14 = tmp13 >= tmp6
tmp15 = tmp13 < tmp8
tmp16 = tmp14 & tmp15
tmp17 = tmp9 & tmp16
tmp18 = tmp7 & tmp17
tmp19 = tmp13.to(tl.int64)
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert((0 <= tmp25) & (tmp25 < 4) | ~xmask,
'index out of bounds: 0 <= tmp25 < 4')
tmp27 = tmp5.to(tl.int64)
tmp28 = tl.where(tmp18, tmp27, tmp20)
tmp29 = tmp28 + tmp22
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert((0 <= tmp31) & (tmp31 < 4) | ~xmask,
'index out of bounds: 0 <= tmp31 < 4')
tmp33 = tl.load(in_ptr1 + (tmp31 + 4 * tmp25 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp34 = tl.full([1], 1, tl.int64)
tmp35 = tl.where(tmp18, tmp34, tmp20)
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp33 * tmp36
tl.store(out_ptr0 + x4, tmp37, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((1, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_fill_0[grid(48)](buf2, 48, XBLOCK=64, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((1, 3, 2), (6, 2, 1), torch.float32)
triton_poi_fused_div_lift_fresh_1[grid(6)](buf3, 6, XBLOCK=8,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((1, 16, 2), (32, 2, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 3), (48, 3, 1),
0), buf3, out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_grid_sampler_2d_2[grid(256)](buf4, arg0_1, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf4
return buf5,
class TranslateYNew(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Hayoung93/UDA | TranslateY | false | 961 | [
"Apache-2.0"
] | 0 | a587b01c76141d64e7cead55b62e0f3ed75890bf | https://github.com/Hayoung93/UDA/tree/a587b01c76141d64e7cead55b62e0f3ed75890bf | import torch
import torch.nn as nn
from torchvision import transforms as ttf
class Model(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
try:
max_size = img.size()[1]
except TypeError:
max_size = img.size()[1]
return ttf.functional.affine(img, 0, [0, (max_size - 1) / 10 * self
.M], 1, [0, 0])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ListMLELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/2k/c2kcraw2l6i5usoqj5lv2qye7u4cg26yoptt475f7j54itqpogy5.py
# Topologically Sorted Source Nodes: [y_true_shuffled, sort], Original ATen: [aten.index, aten.sort]
# Source node to ATen node mapping:
# sort => sort
# y_true_shuffled => index_1
# Graph fragment:
# %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [None, %randperm]), kwargs = {})
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%index_1, -1, True), kwargs = {})
triton_per_fused_index_sort_0 = async_compile.triton('triton_per_fused_index_sort_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*i16', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_sort_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_sort_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x1 = (xindex // 4) % 4
r3 = rindex
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (r3 + (4*x0) + (16*tmp4) + (64*x2)), xmask, other=0.0)
tmp7 = r3
tmp8 = tmp7.to(tl.int16)
tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11, tmp12, = triton_helpers.sort_with_index(tmp9, tmp10, None, 1, stable=False, descending=True)
tl.store(out_ptr0 + (r3 + (4*x4)), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/km/ckmab3w6k6isdk4mh5vg7dy675iyetyejlqa5yss7klua3lx5sjc.py
# Topologically Sorted Source Nodes: [y_true_shuffled, sort, y_pred_shuffled, preds_sorted_by_true, max_1, preds_sorted_by_true_minus_max, exp, flip, cumsum], Original ATen: [aten.index, aten.sort, aten.gather, aten.max, aten.sub, aten.exp, aten.flip, aten.cumsum]
# Source node to ATen node mapping:
# cumsum => cumsum
# exp => exp
# flip => rev
# max_1 => max_1
# preds_sorted_by_true => gather
# preds_sorted_by_true_minus_max => sub
# sort => sort
# y_pred_shuffled => index
# y_true_shuffled => index_1
# Graph fragment:
# %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [None, %randperm]), kwargs = {})
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%index_1, -1, True), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %randperm]), kwargs = {})
# %gather : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%index, 1, %getitem_1), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%gather, 1, True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%gather, %getitem_2), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %rev : [num_users=1] = call_function[target=torch.ops.prims.rev.default](args = (%exp, [1]), kwargs = {})
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%rev, 1), kwargs = {})
triton_per_fused_cumsum_exp_flip_gather_index_max_sort_sub_1 = async_compile.triton('triton_per_fused_cumsum_exp_flip_gather_index_max_sort_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*i16', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_exp_flip_gather_index_max_sort_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cumsum_exp_flip_gather_index_max_sort_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x3 = xindex
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr0 + (48 + x0 + ((-16)*r2) + (64*x1)), xmask, other=0.0)
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK, 1], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert(((0 <= tmp5) & (tmp5 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp5 < 4")
tmp7 = tl.load(in_ptr1 + (tmp5), xmask, eviction_policy='evict_last')
tmp8 = tmp7 + tmp2
tmp9 = tmp7 < 0
tmp10 = tl.where(tmp9, tmp8, tmp7)
tl.device_assert(((0 <= tmp10) & (tmp10 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp10 < 4")
tmp12 = tl.load(in_ptr2 + (x0 + (16*tmp10) + (64*x1)), xmask)
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 + tmp2
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tl.device_assert(((0 <= tmp17) & (tmp17 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp17 < 4")
tmp19 = tl.load(in_ptr1 + (tmp17), xmask, eviction_policy='evict_last')
tmp20 = tmp19 + tmp2
tmp21 = tmp19 < 0
tmp22 = tl.where(tmp21, tmp20, tmp19)
tl.device_assert(((0 <= tmp22) & (tmp22 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp22 < 4")
tmp24 = tl.load(in_ptr2 + (x0 + (16*tmp22) + (64*x1)), xmask)
tmp25 = triton_helpers.maximum(tmp12, tmp24)
tmp27 = tmp26.to(tl.int64)
tmp28 = tmp27 + tmp2
tmp29 = tmp27 < 0
tmp30 = tl.where(tmp29, tmp28, tmp27)
tl.device_assert(((0 <= tmp30) & (tmp30 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp30 < 4")
tmp32 = tl.load(in_ptr1 + (tmp30), xmask, eviction_policy='evict_last')
tmp33 = tmp32 + tmp2
tmp34 = tmp32 < 0
tmp35 = tl.where(tmp34, tmp33, tmp32)
tl.device_assert(((0 <= tmp35) & (tmp35 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp35 < 4")
tmp37 = tl.load(in_ptr2 + (x0 + (16*tmp35) + (64*x1)), xmask)
tmp38 = triton_helpers.maximum(tmp25, tmp37)
tmp40 = tmp39.to(tl.int64)
tmp41 = tmp40 + tmp2
tmp42 = tmp40 < 0
tmp43 = tl.where(tmp42, tmp41, tmp40)
tl.device_assert(((0 <= tmp43) & (tmp43 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp43 < 4")
tmp45 = tl.load(in_ptr1 + (tmp43), xmask, eviction_policy='evict_last')
tmp46 = tmp45 + tmp2
tmp47 = tmp45 < 0
tmp48 = tl.where(tmp47, tmp46, tmp45)
tl.device_assert(((0 <= tmp48) & (tmp48 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp48 < 4")
tmp50 = tl.load(in_ptr2 + (x0 + (16*tmp48) + (64*x1)), xmask)
tmp51 = triton_helpers.maximum(tmp38, tmp50)
tmp53 = tmp52.to(tl.int64)
tmp54 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp55 = tmp53 + tmp54
tmp56 = tmp53 < 0
tmp57 = tl.where(tmp56, tmp55, tmp53)
tl.device_assert(((0 <= tmp57) & (tmp57 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp57 < 4")
tmp59 = tl.load(in_ptr1 + (tmp57), xmask, eviction_policy='evict_last')
tmp60 = tmp59 + tmp54
tmp61 = tmp59 < 0
tmp62 = tl.where(tmp61, tmp60, tmp59)
tl.device_assert(((0 <= tmp62) & (tmp62 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp62 < 4")
tmp64 = tl.load(in_ptr2 + (x0 + (16*tmp62) + (64*x1)), xmask)
tmp65 = tmp64 - tmp51
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp66.to(tl.float32)
tmp68 = tl.broadcast_to(tmp67, [XBLOCK, RBLOCK])
tmp69, = tl.associative_scan((tmp68,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (x3), tmp51, xmask)
tl.store(out_ptr1 + (x0 + (16*r2) + (64*x1)), tmp69, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/pt/cptdzn34dy5g2gawu6q2nc7xqndn4wjrzh4mxmffekrctqd4zngu.py
# Topologically Sorted Source Nodes: [y_true_shuffled, sort, y_pred_shuffled, preds_sorted_by_true, preds_sorted_by_true_minus_max, cumsums, add, log, observation_loss, sum_1, mean], Original ATen: [aten.index, aten.sort, aten.gather, aten.sub, aten.flip, aten.add, aten.log, aten.sum, aten.mean]
# Source node to ATen node mapping:
# add => add
# cumsums => rev_1
# log => log
# mean => mean
# observation_loss => sub_1
# preds_sorted_by_true => gather
# preds_sorted_by_true_minus_max => sub
# sort => sort
# sum_1 => sum_1
# y_pred_shuffled => index
# y_true_shuffled => index_1
# Graph fragment:
# %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg1_1, [None, %randperm]), kwargs = {})
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%index_1, -1, True), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %randperm]), kwargs = {})
# %gather : [num_users=2] = call_function[target=torch.ops.aten.gather.default](args = (%index, 1, %getitem_1), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%gather, %getitem_2), kwargs = {})
# %rev_1 : [num_users=1] = call_function[target=torch.ops.prims.rev.default](args = (%cumsum, [1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rev_1, 1e-05), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %sub), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_1, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
triton_per_fused_add_flip_gather_index_log_mean_sort_sub_sum_2 = async_compile.triton('triton_per_fused_add_flip_gather_index_log_mean_sort_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i16', 4: '*i64', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=(6,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_flip_gather_index_log_mean_sort_sub_sum_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_flip_gather_index_log_mean_sort_sub_sum_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp4 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp17 = tl.load(in_out_ptr0 + (r2), None)
tmp20 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp23 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp38 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp41 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp56 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp59 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp1 = 1e-05
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = tmp4.to(tl.int64)
tmp6 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tl.device_assert((0 <= tmp9) & (tmp9 < 4), "index out of bounds: 0 <= tmp9 < 4")
tmp11 = tl.load(in_ptr2 + (tmp9), None, eviction_policy='evict_last')
tmp12 = tmp11 + tmp6
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tl.device_assert((0 <= tmp14) & (tmp14 < 4), "index out of bounds: 0 <= tmp14 < 4")
tmp16 = tl.load(in_ptr3 + (r0 + (16*tmp14) + (64*r1)), None)
tmp18 = tmp16 - tmp17
tmp19 = tmp3 - tmp18
tmp21 = tmp20 + tmp1
tmp22 = tl_math.log(tmp21)
tmp24 = tmp23.to(tl.int64)
tmp25 = tmp24 + tmp6
tmp26 = tmp24 < 0
tmp27 = tl.where(tmp26, tmp25, tmp24)
tl.device_assert((0 <= tmp27) & (tmp27 < 4), "index out of bounds: 0 <= tmp27 < 4")
tmp29 = tl.load(in_ptr2 + (tmp27), None, eviction_policy='evict_last')
tmp30 = tmp29 + tmp6
tmp31 = tmp29 < 0
tmp32 = tl.where(tmp31, tmp30, tmp29)
tl.device_assert((0 <= tmp32) & (tmp32 < 4), "index out of bounds: 0 <= tmp32 < 4")
tmp34 = tl.load(in_ptr3 + (r0 + (16*tmp32) + (64*r1)), None)
tmp35 = tmp34 - tmp17
tmp36 = tmp22 - tmp35
tmp37 = tmp19 + tmp36
tmp39 = tmp38 + tmp1
tmp40 = tl_math.log(tmp39)
tmp42 = tmp41.to(tl.int64)
tmp43 = tmp42 + tmp6
tmp44 = tmp42 < 0
tmp45 = tl.where(tmp44, tmp43, tmp42)
tl.device_assert((0 <= tmp45) & (tmp45 < 4), "index out of bounds: 0 <= tmp45 < 4")
tmp47 = tl.load(in_ptr2 + (tmp45), None, eviction_policy='evict_last')
tmp48 = tmp47 + tmp6
tmp49 = tmp47 < 0
tmp50 = tl.where(tmp49, tmp48, tmp47)
tl.device_assert((0 <= tmp50) & (tmp50 < 4), "index out of bounds: 0 <= tmp50 < 4")
tmp52 = tl.load(in_ptr3 + (r0 + (16*tmp50) + (64*r1)), None)
tmp53 = tmp52 - tmp17
tmp54 = tmp40 - tmp53
tmp55 = tmp37 + tmp54
tmp57 = tmp56 + tmp1
tmp58 = tl_math.log(tmp57)
tmp60 = tmp59.to(tl.int64)
tmp61 = tmp60 + tmp6
tmp62 = tmp60 < 0
tmp63 = tl.where(tmp62, tmp61, tmp60)
tl.device_assert((0 <= tmp63) & (tmp63 < 4), "index out of bounds: 0 <= tmp63 < 4")
tmp65 = tl.load(in_ptr2 + (tmp63), None, eviction_policy='evict_last')
tmp66 = tmp65 + tmp6
tmp67 = tmp65 < 0
tmp68 = tl.where(tmp67, tmp66, tmp65)
tl.device_assert((0 <= tmp68) & (tmp68 < 4), "index out of bounds: 0 <= tmp68 < 4")
tmp70 = tl.load(in_ptr3 + (r0 + (16*tmp68) + (64*r1)), None)
tmp71 = tmp70 - tmp17
tmp72 = tmp58 - tmp71
tmp73 = tmp55 + tmp72
tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK])
tmp76 = tl.sum(tmp74, 1)[:, None]
tmp77 = 64.0
tmp78 = tmp76 / tmp77
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp78, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [random_indices], Original ATen: [aten.randperm]
buf0 = torch.ops.aten.randperm.default(4, device=device(type='cuda', index=0), pin_memory=False)
buf1 = buf0
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16)
# Topologically Sorted Source Nodes: [y_true_shuffled, sort], Original ATen: [aten.index, aten.sort]
stream0 = get_raw_stream(0)
triton_per_fused_index_sort_0.run(buf1, arg1_1, buf3, 64, 4, grid=grid(64), stream=stream0)
del arg1_1
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_true_shuffled, sort, y_pred_shuffled, preds_sorted_by_true, max_1, preds_sorted_by_true_minus_max, exp, flip, cumsum], Original ATen: [aten.index, aten.sort, aten.gather, aten.max, aten.sub, aten.exp, aten.flip, aten.cumsum]
triton_per_fused_cumsum_exp_flip_gather_index_max_sort_sub_1.run(buf3, buf1, arg0_1, buf4, buf5, 64, 4, grid=grid(64), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [y_true_shuffled, sort, y_pred_shuffled, preds_sorted_by_true, preds_sorted_by_true_minus_max, cumsums, add, log, observation_loss, sum_1, mean], Original ATen: [aten.index, aten.sort, aten.gather, aten.sub, aten.flip, aten.add, aten.log, aten.sum, aten.mean]
triton_per_fused_add_flip_gather_index_log_mean_sort_sub_sum_2.run(buf6, buf8, buf5, buf3, buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del buf1
del buf3
del buf5
del buf6
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ListMLELoss(nn.Module):
def __init__(self):
super(ListMLELoss, self).__init__()
return
def forward(self, y_pred, y_true, eps=1e-05, padded_value_indicator=-1):
"""
ListMLE loss introduced in "Listwise Approach to Learning to Rank - Theory and Algorithm".
:param y_pred: predictions from the model, shape [batch_size, slate_length]
:param y_true: ground truth labels, shape [batch_size, slate_length]
:param eps: epsilon value, used for numerical stability
:param padded_value_indicator: an indicator of the y_true index containing a padded item, e.g. -1
:return: loss value, a torch.Tensor
"""
random_indices = torch.randperm(y_pred.shape[-1])
y_pred_shuffled = y_pred[:, random_indices]
y_true_shuffled = y_true[:, random_indices]
_y_true_sorted, indices = y_true_shuffled.sort(descending=True, dim=-1)
preds_sorted_by_true = torch.gather(y_pred_shuffled, dim=1, index=
indices)
max_pred_values, _ = preds_sorted_by_true.max(dim=1, keepdim=True)
preds_sorted_by_true_minus_max = preds_sorted_by_true - max_pred_values
cumsums = torch.cumsum(preds_sorted_by_true_minus_max.exp().flip(
dims=[1]), dim=1).flip(dims=[1])
observation_loss = torch.log(cumsums + eps
) - preds_sorted_by_true_minus_max
return torch.mean(torch.sum(observation_loss, dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch import device
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_index_sort_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x1 = xindex // 4 % 4
r3 = rindex
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (r3 + 4 * x0 + 16 * tmp4 + 64 * x2), xmask,
other=0.0)
tmp7 = r3
tmp8 = tmp7.to(tl.int16)
tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp10 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
_tmp11, tmp12 = triton_helpers.sort_with_index(tmp9, tmp10, None, 1,
stable=False, descending=True)
tl.store(out_ptr0 + (r3 + 4 * x4), tmp12, xmask)
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_exp_flip_gather_index_max_sort_sub_1(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr
):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp39 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp52 = tl.load(in_ptr0 + (48 + x0 + -16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK, 1], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask,
'index out of bounds: 0 <= tmp5 < 4')
tmp7 = tl.load(in_ptr1 + tmp5, xmask, eviction_policy='evict_last')
tmp8 = tmp7 + tmp2
tmp9 = tmp7 < 0
tmp10 = tl.where(tmp9, tmp8, tmp7)
tl.device_assert((0 <= tmp10) & (tmp10 < 4) | ~xmask,
'index out of bounds: 0 <= tmp10 < 4')
tmp12 = tl.load(in_ptr2 + (x0 + 16 * tmp10 + 64 * x1), xmask)
tmp14 = tmp13.to(tl.int64)
tmp15 = tmp14 + tmp2
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tl.device_assert((0 <= tmp17) & (tmp17 < 4) | ~xmask,
'index out of bounds: 0 <= tmp17 < 4')
tmp19 = tl.load(in_ptr1 + tmp17, xmask, eviction_policy='evict_last')
tmp20 = tmp19 + tmp2
tmp21 = tmp19 < 0
tmp22 = tl.where(tmp21, tmp20, tmp19)
tl.device_assert((0 <= tmp22) & (tmp22 < 4) | ~xmask,
'index out of bounds: 0 <= tmp22 < 4')
tmp24 = tl.load(in_ptr2 + (x0 + 16 * tmp22 + 64 * x1), xmask)
tmp25 = triton_helpers.maximum(tmp12, tmp24)
tmp27 = tmp26.to(tl.int64)
tmp28 = tmp27 + tmp2
tmp29 = tmp27 < 0
tmp30 = tl.where(tmp29, tmp28, tmp27)
tl.device_assert((0 <= tmp30) & (tmp30 < 4) | ~xmask,
'index out of bounds: 0 <= tmp30 < 4')
tmp32 = tl.load(in_ptr1 + tmp30, xmask, eviction_policy='evict_last')
tmp33 = tmp32 + tmp2
tmp34 = tmp32 < 0
tmp35 = tl.where(tmp34, tmp33, tmp32)
tl.device_assert((0 <= tmp35) & (tmp35 < 4) | ~xmask,
'index out of bounds: 0 <= tmp35 < 4')
tmp37 = tl.load(in_ptr2 + (x0 + 16 * tmp35 + 64 * x1), xmask)
tmp38 = triton_helpers.maximum(tmp25, tmp37)
tmp40 = tmp39.to(tl.int64)
tmp41 = tmp40 + tmp2
tmp42 = tmp40 < 0
tmp43 = tl.where(tmp42, tmp41, tmp40)
tl.device_assert((0 <= tmp43) & (tmp43 < 4) | ~xmask,
'index out of bounds: 0 <= tmp43 < 4')
tmp45 = tl.load(in_ptr1 + tmp43, xmask, eviction_policy='evict_last')
tmp46 = tmp45 + tmp2
tmp47 = tmp45 < 0
tmp48 = tl.where(tmp47, tmp46, tmp45)
tl.device_assert((0 <= tmp48) & (tmp48 < 4) | ~xmask,
'index out of bounds: 0 <= tmp48 < 4')
tmp50 = tl.load(in_ptr2 + (x0 + 16 * tmp48 + 64 * x1), xmask)
tmp51 = triton_helpers.maximum(tmp38, tmp50)
tmp53 = tmp52.to(tl.int64)
tmp54 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp55 = tmp53 + tmp54
tmp56 = tmp53 < 0
tmp57 = tl.where(tmp56, tmp55, tmp53)
tl.device_assert((0 <= tmp57) & (tmp57 < 4) | ~xmask,
'index out of bounds: 0 <= tmp57 < 4')
tmp59 = tl.load(in_ptr1 + tmp57, xmask, eviction_policy='evict_last')
tmp60 = tmp59 + tmp54
tmp61 = tmp59 < 0
tmp62 = tl.where(tmp61, tmp60, tmp59)
tl.device_assert((0 <= tmp62) & (tmp62 < 4) | ~xmask,
'index out of bounds: 0 <= tmp62 < 4')
tmp64 = tl.load(in_ptr2 + (x0 + 16 * tmp62 + 64 * x1), xmask)
tmp65 = tmp64 - tmp51
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp66.to(tl.float32)
tmp68 = tl.broadcast_to(tmp67, [XBLOCK, RBLOCK])
tmp69, = tl.associative_scan((tmp68,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + x3, tmp51, xmask)
tl.store(out_ptr1 + (x0 + 16 * r2 + 64 * x1), tmp69, xmask)
@triton.jit
def triton_per_fused_add_flip_gather_index_log_mean_sort_sub_sum_2(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
r2 = rindex
tmp0 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp4 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp17 = tl.load(in_out_ptr0 + r2, None)
tmp20 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp23 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp38 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp41 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp56 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp59 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = 1e-05
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = tmp4.to(tl.int64)
tmp6 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp7 = tmp5 + tmp6
tmp8 = tmp5 < 0
tmp9 = tl.where(tmp8, tmp7, tmp5)
tl.device_assert((0 <= tmp9) & (tmp9 < 4),
'index out of bounds: 0 <= tmp9 < 4')
tmp11 = tl.load(in_ptr2 + tmp9, None, eviction_policy='evict_last')
tmp12 = tmp11 + tmp6
tmp13 = tmp11 < 0
tmp14 = tl.where(tmp13, tmp12, tmp11)
tl.device_assert((0 <= tmp14) & (tmp14 < 4),
'index out of bounds: 0 <= tmp14 < 4')
tmp16 = tl.load(in_ptr3 + (r0 + 16 * tmp14 + 64 * r1), None)
tmp18 = tmp16 - tmp17
tmp19 = tmp3 - tmp18
tmp21 = tmp20 + tmp1
tmp22 = tl_math.log(tmp21)
tmp24 = tmp23.to(tl.int64)
tmp25 = tmp24 + tmp6
tmp26 = tmp24 < 0
tmp27 = tl.where(tmp26, tmp25, tmp24)
tl.device_assert((0 <= tmp27) & (tmp27 < 4),
'index out of bounds: 0 <= tmp27 < 4')
tmp29 = tl.load(in_ptr2 + tmp27, None, eviction_policy='evict_last')
tmp30 = tmp29 + tmp6
tmp31 = tmp29 < 0
tmp32 = tl.where(tmp31, tmp30, tmp29)
tl.device_assert((0 <= tmp32) & (tmp32 < 4),
'index out of bounds: 0 <= tmp32 < 4')
tmp34 = tl.load(in_ptr3 + (r0 + 16 * tmp32 + 64 * r1), None)
tmp35 = tmp34 - tmp17
tmp36 = tmp22 - tmp35
tmp37 = tmp19 + tmp36
tmp39 = tmp38 + tmp1
tmp40 = tl_math.log(tmp39)
tmp42 = tmp41.to(tl.int64)
tmp43 = tmp42 + tmp6
tmp44 = tmp42 < 0
tmp45 = tl.where(tmp44, tmp43, tmp42)
tl.device_assert((0 <= tmp45) & (tmp45 < 4),
'index out of bounds: 0 <= tmp45 < 4')
tmp47 = tl.load(in_ptr2 + tmp45, None, eviction_policy='evict_last')
tmp48 = tmp47 + tmp6
tmp49 = tmp47 < 0
tmp50 = tl.where(tmp49, tmp48, tmp47)
tl.device_assert((0 <= tmp50) & (tmp50 < 4),
'index out of bounds: 0 <= tmp50 < 4')
tmp52 = tl.load(in_ptr3 + (r0 + 16 * tmp50 + 64 * r1), None)
tmp53 = tmp52 - tmp17
tmp54 = tmp40 - tmp53
tmp55 = tmp37 + tmp54
tmp57 = tmp56 + tmp1
tmp58 = tl_math.log(tmp57)
tmp60 = tmp59.to(tl.int64)
tmp61 = tmp60 + tmp6
tmp62 = tmp60 < 0
tmp63 = tl.where(tmp62, tmp61, tmp60)
tl.device_assert((0 <= tmp63) & (tmp63 < 4),
'index out of bounds: 0 <= tmp63 < 4')
tmp65 = tl.load(in_ptr2 + tmp63, None, eviction_policy='evict_last')
tmp66 = tmp65 + tmp6
tmp67 = tmp65 < 0
tmp68 = tl.where(tmp67, tmp66, tmp65)
tl.device_assert((0 <= tmp68) & (tmp68 < 4),
'index out of bounds: 0 <= tmp68 < 4')
tmp70 = tl.load(in_ptr3 + (r0 + 16 * tmp68 + 64 * r1), None)
tmp71 = tmp70 - tmp17
tmp72 = tmp58 - tmp71
tmp73 = tmp55 + tmp72
tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK])
tmp76 = tl.sum(tmp74, 1)[:, None]
tmp77 = 64.0
tmp78 = tmp76 / tmp77
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp78, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.randperm.default(4, device=device(type='cuda',
index=0), pin_memory=False)
buf1 = buf0
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int16)
get_raw_stream(0)
triton_per_fused_index_sort_0[grid(64)](buf1, arg1_1, buf3, 64, 4,
XBLOCK=8, num_warps=2, num_stages=1)
del arg1_1
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_cumsum_exp_flip_gather_index_max_sort_sub_1[grid(64)](
buf3, buf1, arg0_1, buf4, buf5, 64, 4, XBLOCK=8, num_warps=2,
num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = buf7
del buf7
triton_per_fused_add_flip_gather_index_log_mean_sort_sub_sum_2[grid(1)
](buf6, buf8, buf5, buf3, buf1, arg0_1, 1, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
del buf1
del buf3
del buf5
del buf6
return buf8,
class ListMLELossNew(nn.Module):
def __init__(self):
super(ListMLELossNew, self).__init__()
return
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Pepijnnn/MasterThesis | ListMLELoss | false | 962 | [
"MIT"
] | 0 | 7ec831f5e55f5f181e0196fa78284e2846ce2e26 | https://github.com/Pepijnnn/MasterThesis/tree/7ec831f5e55f5f181e0196fa78284e2846ce2e26 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
return
def forward(self, y_pred, y_true, eps=1e-05, padded_value_indicator=-1):
"""
ListMLE loss introduced in "Listwise Approach to Learning to Rank - Theory and Algorithm".
:param y_pred: predictions from the model, shape [batch_size, slate_length]
:param y_true: ground truth labels, shape [batch_size, slate_length]
:param eps: epsilon value, used for numerical stability
:param padded_value_indicator: an indicator of the y_true index containing a padded item, e.g. -1
:return: loss value, a torch.Tensor
"""
random_indices = torch.randperm(y_pred.shape[-1])
y_pred_shuffled = y_pred[:, random_indices]
y_true_shuffled = y_true[:, random_indices]
_y_true_sorted, indices = y_true_shuffled.sort(descending=True, dim=-1)
preds_sorted_by_true = torch.gather(y_pred_shuffled, dim=1, index=
indices)
max_pred_values, _ = preds_sorted_by_true.max(dim=1, keepdim=True)
preds_sorted_by_true_minus_max = preds_sorted_by_true - max_pred_values
cumsums = torch.cumsum(preds_sorted_by_true_minus_max.exp().flip(
dims=[1]), dim=1).flip(dims=[1])
observation_loss = torch.log(cumsums + eps
) - preds_sorted_by_true_minus_max
return torch.mean(torch.sum(observation_loss, dim=1))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/s7/cs7jv7spsytbq3ouvdhla2tcr7wzgoznysid6m7rapuqn7g7cc3h.py
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# intersection => mul
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vq/cvqiixp4wmb73ig2cla6idbqq7i6vd5n3qmdluadrv32f52pdgw3.py
# Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, loss, sum_4, truediv_1, loss_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# loss => div
# loss_1 => sub
# mul_1 => mul_1
# sum_4 => sum_4
# truediv_1 => div_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, loss, sum_4, truediv_1, loss_1], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub]
triton_per_fused_add_div_mul_rsub_sum_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target):
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) +
target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg1_1, arg0_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class DiceLossNew(nn.Module):
def __init__(self):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Qsingle/MedicalImage | DiceLoss | false | 963 | [
"MIT"
] | 0 | a5020d7d2266669a4d6ffec224430e8b25cc1dfc | https://github.com/Qsingle/MedicalImage/tree/a5020d7d2266669a4d6ffec224430e8b25cc1dfc | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input, target):
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) +
target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DummyLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/tx/ctxchhellli4zhqwcsl42vuibttmcqoaf36rxpduozn6jvadgnyt.py
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# sub => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %primals_1), kwargs = {})
triton_poi_fused_add_sub_0 = async_compile.triton('triton_poi_fused_add_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tmp3 - tmp2
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_sub_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class DummyLayer(torch.nn.Module):
def __init__(self):
super().__init__()
self.dummy = torch.nn.Parameter(torch.ones(1, dtype=torch.float32))
def forward(self, x):
return x + self.dummy - self.dummy
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tmp3 - tmp2
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_sub_0[grid(256)](primals_2, primals_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class DummyLayerNew(torch.nn.Module):
def __init__(self):
super().__init__()
self.dummy = torch.nn.Parameter(torch.ones(1, dtype=torch.float32))
def forward(self, input_0):
primals_1 = self.dummy
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| RICE-EIC/Early-Bird-GCN | DummyLayer | false | 964 | [
"Apache-2.0"
] | 0 | 25a80b23f2ecfc46ffe00b1cf0e06052b32aad0f | https://github.com/RICE-EIC/Early-Bird-GCN/tree/25a80b23f2ecfc46ffe00b1cf0e06052b32aad0f | import torch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.dummy = torch.nn.Parameter(torch.ones(1, dtype=torch.float32))
def forward(self, x):
return x + self.dummy - self.dummy
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/qg/cqgejm4wp2tvoab2fpggn6ygzxekamtcj65undyfrstdf2jttwb4.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dice, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# dice => div
# intersection => sum_1
# mul => mul
# mul_1 => mul_1
# sub => sub
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = tmp14 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dice, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
"""
Criterion that computes Sørensen-Dice Coefficient loss.
https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
"""
def __init__(self):
super().__init__()
self.smooth = 1.0
def forward(self, input, target):
input = input.view(-1)
target = target.view(-1)
intersection = (input * target).sum()
dice = (2.0 * intersection + self.smooth) / (input.sum() + target.
sum() + self.smooth)
return 1 - dice
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = tmp14 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
"""
Criterion that computes Sørensen-Dice Coefficient loss.
https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
"""
def __init__(self):
super().__init__()
self.smooth = 1.0
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Quentin18/road-segmentation | DiceLoss | false | 965 | [
"MIT"
] | 0 | 9d212c80fa3f6926c431847337d2ca38ec96b614 | https://github.com/Quentin18/road-segmentation/tree/9d212c80fa3f6926c431847337d2ca38ec96b614 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Criterion that computes Sørensen-Dice Coefficient loss.
https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
"""
def __init__(self):
super().__init__()
self.smooth = 1.0
def forward(self, input, target):
input = input.view(-1)
target = target.view(-1)
intersection = (input * target).sum()
dice = (2.0 * intersection + self.smooth) / (input.sum() + target.
sum() + self.smooth)
return 1 - dice
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ShearX | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/64/c64wcolc5y2n37sxtnpnhr2dzw4gq7giy4xyi4ub5z3kplhqckpr.py
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
# Source node to ATen node mapping:
# fill_ => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %full_default, 3, 2), kwargs = {})
triton_poi_fused_fill_0 = async_compile.triton('triton_poi_fused_fill_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x2 = (xindex // 12)
x1 = (xindex // 3) % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = x2
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 1.0
tmp10 = tmp6 * tmp9
tmp11 = -1.5
tmp12 = tmp10 + tmp11
tmp13 = 3 + ((-1)*x2)
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.5
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = x1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp22 < tmp7
tmp24 = tmp22 * tmp9
tmp25 = tmp24 + tmp11
tmp26 = 3 + ((-1)*x1)
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp9
tmp29 = tmp16 - tmp28
tmp30 = tl.where(tmp23, tmp25, tmp29)
tmp31 = float("nan")
tmp32 = tl.where(tmp20, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp18, tmp32)
tmp34 = tl.where(tmp2, tmp9, tmp33)
tl.store(out_ptr0 + (x4), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6x/c6x27w4kh5rblyswcsrq6rtjtm6kthxuhgphvfnzntdp5j3sz3yt.py
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta], Original ATen: [aten.lift_fresh, aten.div]
# Source node to ATen node mapping:
# rescaled_theta => div
# tensor_1 => lift_fresh_copy_1
# Graph fragment:
# %lift_fresh_copy_1 : [num_users=1] = call_function[target=torch.ops.aten.lift_fresh_copy.default](args = (%_tensor_constant1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute, %lift_fresh_copy_1), kwargs = {})
triton_poi_fused_div_lift_fresh_1 = async_compile.triton('triton_poi_fused_div_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x1 + (3*x0)
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = -0.737263560295105
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = 1.0
tmp11 = tl.where(tmp4, tmp10, tmp9)
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.full([1], 5, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tl.where(tmp15, tmp10, tmp8)
tmp17 = -0.0
tmp18 = tl.where(tmp13, tmp17, tmp16)
tmp19 = tl.where(tmp2, tmp11, tmp18)
tmp20 = x0
tmp21 = tmp20 < tmp3
tmp22 = 2.0
tmp23 = tl.where(tmp21, tmp22, tmp22)
tmp24 = tmp19 / tmp23
tl.store(out_ptr0 + (x2), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/zf/czfnj57ejqlqojplqt6piz7sosofcvzqdxhskwatgn32njr7acz7.py
# Topologically Sorted Source Nodes: [img], Original ATen: [aten.grid_sampler_2d]
# Source node to ATen node mapping:
# img => add_2, add_3, full_default_3, full_default_4, ge, ge_1, index, logical_and, logical_and_1, logical_and_2, lt_2, lt_3, mul_4, mul_5, mul_6, round_1, round_2, where_4
# Graph fragment:
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_8, 2.0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, 1.5), kwargs = {})
# %round_1 : [num_users=3] = call_function[target=torch.ops.aten.round.default](args = (%add_2,), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%round_1, 0), kwargs = {})
# %lt_2 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%round_1, 4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_9, 2.0), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 1.5), kwargs = {})
# %round_2 : [num_users=3] = call_function[target=torch.ops.aten.round.default](args = (%add_3,), kwargs = {})
# %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%round_2, 0), kwargs = {})
# %lt_3 : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%round_2, 4), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge_1, %lt_3), kwargs = {})
# %logical_and_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%lt_2, %logical_and), kwargs = {})
# %logical_and_2 : [num_users=3] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %logical_and_1), kwargs = {})
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [%view_5, %view_6, %where_3, %where_2]), kwargs = {})
# %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%logical_and_2, %full_default_4, %full_default_3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %where_4), kwargs = {})
triton_poi_fused_grid_sampler_2d_2 = async_compile.triton('triton_poi_fused_grid_sampler_2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_grid_sampler_2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_grid_sampler_2d_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (2*x0)), xmask, eviction_policy='evict_last')
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.5
tmp4 = tmp2 + tmp3
tmp5 = libdevice.nearbyint(tmp4)
tmp6 = 0.0
tmp7 = tmp5 >= tmp6
tmp8 = 4.0
tmp9 = tmp5 < tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp11 + tmp3
tmp13 = libdevice.nearbyint(tmp12)
tmp14 = tmp13 >= tmp6
tmp15 = tmp13 < tmp8
tmp16 = tmp14 & tmp15
tmp17 = tmp9 & tmp16
tmp18 = tmp7 & tmp17
tmp19 = tmp13.to(tl.int64)
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert(((0 <= tmp25) & (tmp25 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp25 < 4")
tmp27 = tmp5.to(tl.int64)
tmp28 = tl.where(tmp18, tmp27, tmp20)
tmp29 = tmp28 + tmp22
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert(((0 <= tmp31) & (tmp31 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp31 < 4")
tmp33 = tl.load(in_ptr1 + (tmp31 + (4*tmp25) + (16*x3)), xmask, eviction_policy='evict_last')
tmp34 = tl.full([1], 1, tl.int64)
tmp35 = tl.where(tmp18, tmp34, tmp20)
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp33 * tmp36
tl.store(out_ptr0 + (x4), tmp37, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((1, 4, 4, 3), (48, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [fill_], Original ATen: [aten.fill]
stream0 = get_raw_stream(0)
triton_poi_fused_fill_0.run(buf2, 48, grid=grid(48), stream=stream0)
buf3 = empty_strided_cuda((1, 3, 2), (6, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta], Original ATen: [aten.lift_fresh, aten.div]
triton_poi_fused_div_lift_fresh_1.run(buf3, 6, grid=grid(6), stream=stream0)
buf4 = empty_strided_cuda((1, 16, 2), (32, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor_1, rescaled_theta, output_grid], Original ATen: [aten.lift_fresh, aten.div, aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 3), (48, 3, 1), 0), buf3, out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [img], Original ATen: [aten.grid_sampler_2d]
triton_poi_fused_grid_sampler_2d_2.run(buf4, arg0_1, buf5, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf4
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torchvision import transforms as ttf
class ShearX(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, img):
return ttf.functional.affine(img, 0, [0, 0], 1, [self.angle, 0])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'M': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_fill_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x2 = xindex // 12
x1 = xindex // 3 % 4
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 1, tl.int32)
tmp4 = tmp0 == tmp3
tmp5 = x2
tmp6 = tmp5.to(tl.float32)
tmp7 = 2.0
tmp8 = tmp6 < tmp7
tmp9 = 1.0
tmp10 = tmp6 * tmp9
tmp11 = -1.5
tmp12 = tmp10 + tmp11
tmp13 = 3 + -1 * x2
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 * tmp9
tmp16 = 1.5
tmp17 = tmp16 - tmp15
tmp18 = tl.where(tmp8, tmp12, tmp17)
tmp19 = tl.full([1], 0, tl.int32)
tmp20 = tmp0 == tmp19
tmp21 = x1
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp22 < tmp7
tmp24 = tmp22 * tmp9
tmp25 = tmp24 + tmp11
tmp26 = 3 + -1 * x1
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp27 * tmp9
tmp29 = tmp16 - tmp28
tmp30 = tl.where(tmp23, tmp25, tmp29)
tmp31 = float('nan')
tmp32 = tl.where(tmp20, tmp30, tmp31)
tmp33 = tl.where(tmp4, tmp18, tmp32)
tmp34 = tl.where(tmp2, tmp9, tmp33)
tl.store(out_ptr0 + x4, tmp34, xmask)
@triton.jit
def triton_poi_fused_div_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x1 + 3 * x0
tmp1 = tl.full([1], 3, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 2, tl.int64)
tmp6 = tmp0 < tmp5
tmp7 = -0.737263560295105
tmp8 = 0.0
tmp9 = tl.where(tmp6, tmp7, tmp8)
tmp10 = 1.0
tmp11 = tl.where(tmp4, tmp10, tmp9)
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.full([1], 5, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tl.where(tmp15, tmp10, tmp8)
tmp17 = -0.0
tmp18 = tl.where(tmp13, tmp17, tmp16)
tmp19 = tl.where(tmp2, tmp11, tmp18)
tmp20 = x0
tmp21 = tmp20 < tmp3
tmp22 = 2.0
tmp23 = tl.where(tmp21, tmp22, tmp22)
tmp24 = tmp19 / tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_grid_sampler_2d_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x3 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + 2 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 2 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1.5
tmp4 = tmp2 + tmp3
tmp5 = libdevice.nearbyint(tmp4)
tmp6 = 0.0
tmp7 = tmp5 >= tmp6
tmp8 = 4.0
tmp9 = tmp5 < tmp8
tmp11 = tmp10 * tmp1
tmp12 = tmp11 + tmp3
tmp13 = libdevice.nearbyint(tmp12)
tmp14 = tmp13 >= tmp6
tmp15 = tmp13 < tmp8
tmp16 = tmp14 & tmp15
tmp17 = tmp9 & tmp16
tmp18 = tmp7 & tmp17
tmp19 = tmp13.to(tl.int64)
tmp20 = tl.full([1], 0, tl.int64)
tmp21 = tl.where(tmp18, tmp19, tmp20)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert((0 <= tmp25) & (tmp25 < 4) | ~xmask,
'index out of bounds: 0 <= tmp25 < 4')
tmp27 = tmp5.to(tl.int64)
tmp28 = tl.where(tmp18, tmp27, tmp20)
tmp29 = tmp28 + tmp22
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert((0 <= tmp31) & (tmp31 < 4) | ~xmask,
'index out of bounds: 0 <= tmp31 < 4')
tmp33 = tl.load(in_ptr1 + (tmp31 + 4 * tmp25 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp34 = tl.full([1], 1, tl.int64)
tmp35 = tl.where(tmp18, tmp34, tmp20)
tmp36 = tmp35.to(tl.float32)
tmp37 = tmp33 * tmp36
tl.store(out_ptr0 + x4, tmp37, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((1, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_fill_0[grid(48)](buf2, 48, XBLOCK=64, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((1, 3, 2), (6, 2, 1), torch.float32)
triton_poi_fused_div_lift_fresh_1[grid(6)](buf3, 6, XBLOCK=8,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((1, 16, 2), (32, 2, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (1, 16, 3), (48, 3, 1),
0), buf3, out=buf4)
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_grid_sampler_2d_2[grid(256)](buf4, arg0_1, buf5,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf4
return buf5,
class ShearXNew(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Hayoung93/UDA | ShearX | false | 966 | [
"Apache-2.0"
] | 0 | a587b01c76141d64e7cead55b62e0f3ed75890bf | https://github.com/Hayoung93/UDA/tree/a587b01c76141d64e7cead55b62e0f3ed75890bf | import torch
import torch.nn as nn
from torchvision import transforms as ttf
class Model(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, img):
return ttf.functional.affine(img, 0, [0, 0], 1, [self.angle, 0])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Convolution | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ud/cudtupp4xbsxvl5czwt3p2pj3cknjnhtp6x45zymsucnyg3xzdnf.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(arg1_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(arg0_1, buf1, 16, 16, grid=grid(16, 16), stream=stream0)
del arg0_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 4, 4))
del buf0
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as fn
from torch.nn.parameter import Parameter
import torch.nn
def to_pair(data):
"""Converts a single or a tuple of data into a pair. If the data is a tuple with more than two elements, it selects
the first two of them. In case of single data, it duplicates that data into a pair.
Args:
data (object or tuple): The input data.
Returns:
Tuple: A pair of data.
"""
if isinstance(data, tuple):
return data[0:2]
return data, data
class Convolution(nn.Module):
"""Performs a 2D convolution over an input spike-wave composed of several input
planes. Current version only supports stride of 1 with no padding.
The input is a 4D tensor with the size :math:`(T, C_{{in}}, H_{{in}}, W_{{in}})` and the crresponsing output
is of size :math:`(T, C_{{out}}, H_{{out}}, W_{{out}})`,
where :math:`T` is the number of time steps, :math:`C` is the number of feature maps (channels), and
:math:`H`, and :math:`W` are the hight and width of the input/output planes.
* :attr:`in_channels` controls the number of input planes (channels/feature maps).
* :attr:`out_channels` controls the number of feature maps in the current layer.
* :attr:`kernel_size` controls the size of the convolution kernel. It can be a single integer or a tuple of two integers.
* :attr:`weight_mean` controls the mean of the normal distribution used for initial random weights.
* :attr:`weight_std` controls the standard deviation of the normal distribution used for initial random weights.
.. note::
Since this version of convolution does not support padding, it is the user responsibility to add proper padding
on the input before applying convolution.
Args:
in_channels (int): Number of channels in the input.
out_channels (int): Number of channels produced by the convolution.
kernel_size (int or tuple): Size of the convolving kernel.
weight_mean (float, optional): Mean of the initial random weights. Default: 0.8
weight_std (float, optional): Standard deviation of the initial random weights. Default: 0.02
"""
def __init__(self, in_channels, out_channels, kernel_size, weight_mean=
0.8, weight_std=0.02):
super(Convolution, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = to_pair(kernel_size)
self.stride = 1
self.bias = None
self.dilation = 1
self.groups = 1
self.padding = 0
self.weight = Parameter(torch.Tensor(self.out_channels, self.
in_channels, *self.kernel_size))
self.weight.requires_grad_(False)
self.reset_weight(weight_mean, weight_std)
def reset_weight(self, weight_mean=0.8, weight_std=0.02):
"""Resets weights to random values based on a normal distribution.
Args:
weight_mean (float, optional): Mean of the random weights. Default: 0.8
weight_std (float, optional): Standard deviation of the random weights. Default: 0.02
"""
self.weight.normal_(weight_mean, weight_std)
def load_weight(self, target):
"""Loads weights with the target tensor.
Args:
target (Tensor=): The target tensor.
"""
self.weight.copy_(target)
def forward(self, input):
return fn.conv2d(input, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 16)](arg1_1, buf0, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_convolution_0[grid(16, 16)](arg0_1, buf1, 16, 16,
XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg0_1
buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 4, 4))
del buf0
del buf1
return buf2,
def to_pair(data):
"""Converts a single or a tuple of data into a pair. If the data is a tuple with more than two elements, it selects
the first two of them. In case of single data, it duplicates that data into a pair.
Args:
data (object or tuple): The input data.
Returns:
Tuple: A pair of data.
"""
if isinstance(data, tuple):
return data[0:2]
return data, data
class ConvolutionNew(nn.Module):
"""Performs a 2D convolution over an input spike-wave composed of several input
planes. Current version only supports stride of 1 with no padding.
The input is a 4D tensor with the size :math:`(T, C_{{in}}, H_{{in}}, W_{{in}})` and the crresponsing output
is of size :math:`(T, C_{{out}}, H_{{out}}, W_{{out}})`,
where :math:`T` is the number of time steps, :math:`C` is the number of feature maps (channels), and
:math:`H`, and :math:`W` are the hight and width of the input/output planes.
* :attr:`in_channels` controls the number of input planes (channels/feature maps).
* :attr:`out_channels` controls the number of feature maps in the current layer.
* :attr:`kernel_size` controls the size of the convolution kernel. It can be a single integer or a tuple of two integers.
* :attr:`weight_mean` controls the mean of the normal distribution used for initial random weights.
* :attr:`weight_std` controls the standard deviation of the normal distribution used for initial random weights.
.. note::
Since this version of convolution does not support padding, it is the user responsibility to add proper padding
on the input before applying convolution.
Args:
in_channels (int): Number of channels in the input.
out_channels (int): Number of channels produced by the convolution.
kernel_size (int or tuple): Size of the convolving kernel.
weight_mean (float, optional): Mean of the initial random weights. Default: 0.8
weight_std (float, optional): Standard deviation of the initial random weights. Default: 0.02
"""
def __init__(self, in_channels, out_channels, kernel_size, weight_mean=
0.8, weight_std=0.02):
super(ConvolutionNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = to_pair(kernel_size)
self.stride = 1
self.bias = None
self.dilation = 1
self.groups = 1
self.padding = 0
self.weight = Parameter(torch.Tensor(self.out_channels, self.
in_channels, *self.kernel_size))
self.weight.requires_grad_(False)
self.reset_weight(weight_mean, weight_std)
def reset_weight(self, weight_mean=0.8, weight_std=0.02):
"""Resets weights to random values based on a normal distribution.
Args:
weight_mean (float, optional): Mean of the random weights. Default: 0.8
weight_std (float, optional): Standard deviation of the random weights. Default: 0.02
"""
self.weight.normal_(weight_mean, weight_std)
def load_weight(self, target):
"""Loads weights with the target tensor.
Args:
target (Tensor=): The target tensor.
"""
self.weight.copy_(target)
def forward(self, input_0):
arg0_1 = self.weight
arg1_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
| R1704/SpeechRecognitionSNN | Convolution | false | 967 | [
"MIT"
] | 0 | 4b788d1bd20d8ce201da6da8b200b3ca722c7efa | https://github.com/R1704/SpeechRecognitionSNN/tree/4b788d1bd20d8ce201da6da8b200b3ca722c7efa | import torch
import torch.nn as nn
import torch.nn.functional as fn
from torch.nn.parameter import Parameter
import torch.nn
def to_pair(data):
"""Converts a single or a tuple of data into a pair. If the data is a tuple with more than two elements, it selects
the first two of them. In case of single data, it duplicates that data into a pair.
Args:
data (object or tuple): The input data.
Returns:
Tuple: A pair of data.
"""
if isinstance(data, tuple):
return data[0:2]
return data, data
class Model(nn.Module):
"""Performs a 2D convolution over an input spike-wave composed of several input
planes. Current version only supports stride of 1 with no padding.
The input is a 4D tensor with the size :math:`(T, C_{{in}}, H_{{in}}, W_{{in}})` and the crresponsing output
is of size :math:`(T, C_{{out}}, H_{{out}}, W_{{out}})`,
where :math:`T` is the number of time steps, :math:`C` is the number of feature maps (channels), and
:math:`H`, and :math:`W` are the hight and width of the input/output planes.
* :attr:`in_channels` controls the number of input planes (channels/feature maps).
* :attr:`out_channels` controls the number of feature maps in the current layer.
* :attr:`kernel_size` controls the size of the convolution kernel. It can be a single integer or a tuple of two integers.
* :attr:`weight_mean` controls the mean of the normal distribution used for initial random weights.
* :attr:`weight_std` controls the standard deviation of the normal distribution used for initial random weights.
.. note::
Since this version of convolution does not support padding, it is the user responsibility to add proper padding
on the input before applying convolution.
Args:
in_channels (int): Number of channels in the input.
out_channels (int): Number of channels produced by the convolution.
kernel_size (int or tuple): Size of the convolving kernel.
weight_mean (float, optional): Mean of the initial random weights. Default: 0.8
weight_std (float, optional): Standard deviation of the initial random weights. Default: 0.02
"""
def __init__(self, in_channels, out_channels, kernel_size, weight_mean=
0.8, weight_std=0.02):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = to_pair(kernel_size)
self.stride = 1
self.bias = None
self.dilation = 1
self.groups = 1
self.padding = 0
self.weight = Parameter(torch.Tensor(self.out_channels, self.
in_channels, *self.kernel_size))
self.weight.requires_grad_(False)
self.reset_weight(weight_mean, weight_std)
def reset_weight(self, weight_mean=0.8, weight_std=0.02):
"""Resets weights to random values based on a normal distribution.
Args:
weight_mean (float, optional): Mean of the random weights. Default: 0.8
weight_std (float, optional): Standard deviation of the random weights. Default: 0.02
"""
self.weight.normal_(weight_mean, weight_std)
def load_weight(self, target):
"""Loads weights with the target tensor.
Args:
target (Tensor=): The target tensor.
"""
self.weight.copy_(target)
def forward(self, input):
return fn.conv2d(input, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
HighwayLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [gate_output], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# gate_output => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ln/clnagoyz3cauqnx3uxib3ikp6vd4zabtsvlhtc24rayt3qg4yaom.py
# Topologically Sorted Source Nodes: [transform_output, gate_output, transformation_part, type_as, sub, carry_part, add], Original ATen: [aten.relu, aten._softmax, aten.mul, aten._to_copy, aten.sub, aten.add]
# Source node to ATen node mapping:
# add => add
# carry_part => mul_1
# gate_output => div, sum_1
# sub => sub_1
# transform_output => relu
# transformation_part => mul
# type_as => full_default
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %div), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default, %div), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1 = async_compile.triton('triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x3), xmask)
tmp15 = tl.load(in_ptr2 + (x3), xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp11 * tmp8
tmp13 = 1.0
tmp14 = tmp13 - tmp8
tmp16 = tmp14 * tmp15
tmp17 = tmp12 + tmp16
tl.store(in_out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gate_output], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [transform_output, gate_output, transformation_part, type_as, sub, carry_part, add], Original ATen: [aten.relu, aten._softmax, aten.mul, aten._to_copy, aten.sub, aten.add]
triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1.run(buf4, buf2, buf0, primals_3, 256, grid=grid(256), stream=stream0)
del buf2
return (buf4, primals_3, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
class HighwayLayer(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, x):
transform_output = self.highway_transform_activation(self.
highway_transform(x))
gate_output = self.highway_gate_activation(self.highway_gate(x))
transformation_part = torch.mul(transform_output, gate_output)
carry_part = torch.mul(torch.FloatTensor([1.0]).type_as(gate_output
) - gate_output, x)
return torch.add(transformation_part, carry_part)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + x3, xmask)
tmp15 = tl.load(in_ptr2 + x3, xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = tmp11 * tmp8
tmp13 = 1.0
tmp14 = tmp13 - tmp8
tmp16 = tmp14 * tmp15
tmp17 = tmp12 + tmp16
tl.store(in_out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = buf3
del buf3
triton_poi_fused__softmax__to_copy_add_mul_relu_sub_1[grid(256)](buf4,
buf2, buf0, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
return buf4, primals_3, buf0, buf1
class HighwayLayerNew(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, input_0):
primals_1 = self.highway_transform.weight
primals_2 = self.highway_transform.bias
primals_4 = self.highway_gate.weight
primals_5 = self.highway_gate.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ROCmSoftwarePlatform/translate | HighwayLayer | false | 968 | [
"BSD-3-Clause"
] | 0 | 32a6380d914ebe1a6c38c4992aac9600ed3d9810 | https://github.com/ROCmSoftwarePlatform/translate/tree/32a6380d914ebe1a6c38c4992aac9600ed3d9810 | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
class Model(nn.Module):
def __init__(self, input_dim, transform_activation=F.relu,
gate_activation=F.softmax, gate_bias=-2):
super().__init__()
self.highway_transform_activation = transform_activation
self.highway_gate_activation = gate_activation
self.highway_transform = nn.Linear(input_dim, input_dim)
self.highway_gate = nn.Linear(input_dim, input_dim)
self.highway_gate.bias.data.fill_(gate_bias)
def forward(self, x):
transform_output = self.highway_transform_activation(self.
highway_transform(x))
gate_output = self.highway_gate_activation(self.highway_gate(x))
transformation_part = torch.mul(transform_output, gate_output)
carry_part = torch.mul(torch.FloatTensor([1.0]).type_as(gate_output
) - gate_output, x)
return torch.add(transformation_part, carry_part)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6v/c6vkdknlihstoo2scjcyini5rby7e5pq6gqo7jjynq75otbegpoc.py
# Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# x => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_3), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, x], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_3.run(buf4, primals_3, buf5, 64, grid=grid(64), stream=stream0)
return (buf5, buf4, primals_3, buf1, buf4, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.checkpoint
class Attention(nn.Module):
def __init__(self, in_size, hidden_size):
super(Attention, self).__init__()
self.hidden = nn.Linear(in_size, hidden_size)
nn.init.orthogonal_(self.hidden.weight.data)
self.out = nn.Linear(hidden_size, in_size)
nn.init.orthogonal_(self.hidden.weight.data)
self.softmax = nn.Softmax(dim=1)
def forward(self, input):
self.alpha = self.softmax(self.out(torch.tanh(self.hidden(input))))
x = torch.sum(self.alpha * input, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_3[grid(64)](buf4, primals_3, buf5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf5, buf4, primals_3, buf1, buf4, primals_4
class AttentionNew(nn.Module):
def __init__(self, in_size, hidden_size):
super(AttentionNew, self).__init__()
self.hidden = nn.Linear(in_size, hidden_size)
nn.init.orthogonal_(self.hidden.weight.data)
self.out = nn.Linear(hidden_size, in_size)
nn.init.orthogonal_(self.hidden.weight.data)
self.softmax = nn.Softmax(dim=1)
def forward(self, input_0):
primals_1 = self.hidden.weight
primals_2 = self.hidden.bias
primals_4 = self.out.weight
primals_5 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| MarvinLvn/platalea | Attention | false | 969 | [
"Apache-2.0"
] | 0 | 31def0813c90a3259f86f7d86cb576cd66dca3fe | https://github.com/MarvinLvn/platalea/tree/31def0813c90a3259f86f7d86cb576cd66dca3fe | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.checkpoint
class Model(nn.Module):
def __init__(self, in_size, hidden_size):
super().__init__()
self.hidden = nn.Linear(in_size, hidden_size)
nn.init.orthogonal_(self.hidden.weight.data)
self.out = nn.Linear(hidden_size, in_size)
nn.init.orthogonal_(self.hidden.weight.data)
self.softmax = nn.Softmax(dim=1)
def forward(self, input):
self.alpha = self.softmax(self.out(torch.tanh(self.hidden(input))))
x = torch.sum(self.alpha * input, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Generator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7b/c7bf34fgn2dhohe7ejneqlees25vyq6sbe4c5lfvoehzliak2nz6.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Generator(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, size, vocab):
super(Generator, self).__init__()
self.size = size
self.proj = nn.Linear(self.size, vocab)
def forward(self, x):
sliced_x = x[:, 0, :]
out = self.proj(sliced_x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4, 'vocab': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(64)](buf2, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
class GeneratorNew(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, size, vocab):
super(GeneratorNew, self).__init__()
self.size = size
self.proj = nn.Linear(self.size, vocab)
def forward(self, input_0):
primals_2 = self.proj.weight
primals_3 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| QuLog1/QuLog | Generator | false | 970 | [
"Apache-2.0"
] | 0 | 121f3a8c6f5ee60cde771c36b9eef823a1b2597a | https://github.com/QuLog1/QuLog/tree/121f3a8c6f5ee60cde771c36b9eef823a1b2597a | import torch
import torch.nn as nn
class Model(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, size, vocab):
super().__init__()
self.size = size
self.proj = nn.Linear(self.size, vocab)
def forward(self, x):
sliced_x = x[:, 0, :]
out = self.proj(sliced_x)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
CosLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ul/cul46sk6agsata5pdplvfqwaj42gbpi26yrooch7az5rcg4qns4y.py
# Topologically Sorted Source Nodes: [setitem_2], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_2 => copy_2, full_default_3
# Graph fragment:
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_24, %full_default_3), kwargs = {})
# %select_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_5, %copy_2, 0, 2), kwargs = {})
# %select_scatter_default_7 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_4, %select_scatter_default_6, 0, 2), kwargs = {})
triton_poi_fused_fill_lift_fresh_0 = async_compile.triton('triton_poi_fused_fill_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x1 = (xindex // 4) % 4
x3 = xindex
tmp0 = x2
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp5 == tmp5
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp1 == tmp7
tmp9 = tmp3 == tmp7
tmp10 = tmp7 == tmp5
tmp11 = tmp3 == tmp5
tmp12 = -1.0
tmp13 = 1.0
tmp14 = tl.where(tmp11, tmp12, tmp13)
tmp15 = tl.where(tmp10, tmp14, tmp13)
tmp16 = tl.where(tmp6, tmp15, tmp13)
tmp17 = tl.where(tmp9, tmp12, tmp16)
tmp18 = tmp1 == tmp5
tmp19 = tl.where(tmp18, tmp14, tmp13)
tmp20 = tl.where(tmp6, tmp19, tmp13)
tmp21 = tl.where(tmp8, tmp17, tmp20)
tmp22 = tl.where(tmp6, tmp21, tmp20)
tmp23 = tl.where(tmp4, tmp12, tmp22)
tmp24 = tmp0 == tmp7
tmp25 = tmp0 == tmp5
tmp26 = tl.where(tmp25, tmp14, tmp13)
tmp27 = tl.where(tmp6, tmp26, tmp13)
tmp28 = tl.where(tmp24, tmp17, tmp27)
tmp29 = tl.where(tmp6, tmp28, tmp27)
tmp30 = tl.where(tmp2, tmp23, tmp29)
tl.store(out_ptr0 + (x3), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/o5/co5yomkkcs2rgxeof426z4h4rrmbchimvmduqu343izlb45nqrih.py
# Topologically Sorted Source Nodes: [setitem_3], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_3 => copy_3, full_default_4
# Graph fragment:
# %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_3 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_35, %full_default_4), kwargs = {})
# %select_scatter_default_9 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_7, %copy_3, 0, 3), kwargs = {})
# %select_scatter_default_10 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_6, %select_scatter_default_9, 0, 3), kwargs = {})
triton_poi_fused_fill_lift_fresh_1 = async_compile.triton('triton_poi_fused_fill_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x1 = (xindex // 4) % 4
x3 = xindex % 16
x4 = xindex
tmp7 = tl.load(in_ptr0 + (48 + x3), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (x4), xmask)
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp5 == tmp5
tmp8 = tl.full([1], 1, tl.int32)
tmp9 = tmp1 == tmp8
tmp10 = tmp3 == tmp8
tmp11 = tmp8 == tmp5
tmp12 = tmp3 == tmp5
tmp13 = -1.0
tmp14 = 1.0
tmp15 = tl.where(tmp12, tmp13, tmp14)
tmp16 = tl.where(tmp11, tmp15, tmp14)
tmp17 = tl.where(tmp6, tmp16, tmp14)
tmp18 = tl.where(tmp10, tmp13, tmp17)
tmp19 = tmp1 == tmp5
tmp20 = tl.where(tmp19, tmp15, tmp14)
tmp21 = tl.where(tmp6, tmp20, tmp14)
tmp22 = tl.where(tmp9, tmp18, tmp21)
tmp23 = tl.where(tmp6, tmp22, tmp21)
tmp24 = tl.where(tmp6, tmp7, tmp23)
tmp25 = tl.where(tmp4, tmp13, tmp24)
tmp27 = tmp0 == tmp8
tmp28 = tmp0 == tmp5
tmp29 = tl.where(tmp28, tmp15, tmp14)
tmp30 = tl.where(tmp6, tmp29, tmp14)
tmp31 = tl.where(tmp27, tmp18, tmp30)
tmp32 = tl.where(tmp6, tmp31, tmp30)
tmp33 = tl.where(tmp6, tmp26, tmp32)
tmp34 = tl.where(tmp2, tmp25, tmp33)
tl.store(out_ptr0 + (x4), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/un/cun6jfkm6oihpheyya22qcd7wgpe5wbpo63552kvgk4l3rprsvkw.py
# Topologically Sorted Source Nodes: [setitem_4], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_4 => copy_4, full_default_5
# Graph fragment:
# %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_4 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_46, %full_default_5), kwargs = {})
# %select_scatter_default_12 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_9, %copy_4, 0, 0), kwargs = {})
# %select_scatter_default_13 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_8, %select_scatter_default_12, 0, 0), kwargs = {})
triton_poi_fused_fill_lift_fresh_2 = async_compile.triton('triton_poi_fused_fill_lift_fresh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x1 = (xindex // 4) % 4
x3 = xindex % 16
x4 = xindex
tmp7 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (x4), xmask)
tmp27 = tl.load(in_ptr1 + (x4), xmask)
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp5 == tmp1
tmp9 = tmp1 == tmp5
tmp10 = tmp3 == tmp5
tmp11 = tmp1 == tmp1
tmp12 = -1.0
tmp13 = 1.0
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tl.where(tmp6, tmp14, tmp13)
tmp16 = tl.where(tmp11, tmp15, tmp13)
tmp17 = tl.where(tmp10, tmp12, tmp16)
tmp18 = tl.where(tmp11, tmp14, tmp13)
tmp19 = tl.where(tmp11, tmp18, tmp13)
tmp20 = tl.where(tmp9, tmp17, tmp19)
tmp21 = tl.where(tmp6, tmp18, tmp13)
tmp22 = tl.where(tmp6, tmp20, tmp21)
tmp23 = tl.where(tmp6, tmp8, tmp22)
tmp24 = tl.where(tmp6, tmp7, tmp23)
tmp25 = tl.where(tmp4, tmp12, tmp24)
tmp28 = tmp0 == tmp5
tmp29 = tl.where(tmp2, tmp14, tmp13)
tmp30 = tl.where(tmp11, tmp29, tmp13)
tmp31 = tl.where(tmp28, tmp17, tmp30)
tmp32 = tl.where(tmp6, tmp29, tmp13)
tmp33 = tl.where(tmp6, tmp31, tmp32)
tmp34 = tl.where(tmp6, tmp27, tmp33)
tmp35 = tl.where(tmp6, tmp26, tmp34)
tmp36 = tl.where(tmp2, tmp25, tmp35)
tl.store(out_ptr0 + (x4), tmp36, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7y/c7ypywjg537wswju5w3bur534dxsyi6423wwgtvwd2c5f5sm6epb.py
# Topologically Sorted Source Nodes: [setitem_5], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_5 => copy_5, full_default_6
# Graph fragment:
# %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_5 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_57, %full_default_6), kwargs = {})
# %select_scatter_default_15 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_11, %copy_5, 0, 1), kwargs = {})
# %select_scatter_default_16 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_10, %select_scatter_default_15, 0, 1), kwargs = {})
triton_poi_fused_fill_lift_fresh_3 = async_compile.triton('triton_poi_fused_fill_lift_fresh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x1 = (xindex // 4) % 4
x3 = xindex % 16
x4 = xindex
tmp6 = tl.load(in_ptr0 + (16 + x3), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (16 + x3), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (16 + x3), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (x4), xmask)
tmp27 = tl.load(in_ptr1 + (x4), xmask)
tmp28 = tl.load(in_ptr2 + (x4), xmask)
tmp0 = x2
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tmp1 == tmp1
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp1 == tmp7
tmp11 = tmp7 == tmp7
tmp12 = tmp3 == tmp7
tmp13 = -1.0
tmp14 = 1.0
tmp15 = tl.where(tmp12, tmp13, tmp14)
tmp16 = tl.where(tmp8, tmp15, tmp14)
tmp17 = tl.where(tmp11, tmp16, tmp14)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tmp19 = tl.where(tmp5, tmp18, tmp17)
tmp20 = tl.where(tmp8, tmp16, tmp14)
tmp21 = tl.where(tmp8, tmp19, tmp20)
tmp22 = tl.where(tmp8, tmp10, tmp21)
tmp23 = tl.where(tmp8, tmp9, tmp22)
tmp24 = tl.where(tmp5, tmp6, tmp23)
tmp25 = tl.where(tmp4, tmp13, tmp24)
tmp29 = tmp0 == tmp7
tmp30 = tl.where(tmp29, tmp15, tmp14)
tmp31 = tl.where(tmp11, tmp30, tmp14)
tmp32 = tl.where(tmp2, tmp18, tmp31)
tmp33 = tl.where(tmp8, tmp30, tmp14)
tmp34 = tl.where(tmp8, tmp32, tmp33)
tmp35 = tl.where(tmp8, tmp28, tmp34)
tmp36 = tl.where(tmp8, tmp27, tmp35)
tmp37 = tl.where(tmp5, tmp26, tmp36)
tmp38 = tl.where(tmp2, tmp25, tmp37)
tl.store(out_ptr0 + (x4), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vo/cvoqdjgdsbesevfttlp2kzfiy42njlm6joc6vme4keek4dnkvbp5.py
# Topologically Sorted Source Nodes: [mask, setitem, setitem_1], Original ATen: [aten.ones_like, aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# mask => full_default
# setitem => copy, full_default_1
# setitem_1 => copy_1, full_default_2
# Graph fragment:
# %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_2, %full_default_1), kwargs = {})
# %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_1, %copy, 0, 0), kwargs = {})
# %select_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int, %select_scatter_default, 0, 0), kwargs = {})
# %select_scatter_default_2 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%full_default, %select_scatter_default_1, 0, 0), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_13, %full_default_2), kwargs = {})
# %select_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_3, %copy_1, 0, 1), kwargs = {})
# %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_2, %select_scatter_default_3, 0, 1), kwargs = {})
# %select_scatter_default_5 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_2, %select_scatter_default_4, 0, 0), kwargs = {})
# %select_scatter_default_8 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %select_scatter_default_7, 0, 0), kwargs = {})
# %select_scatter_default_11 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_8, %select_scatter_default_10, 0, 0), kwargs = {})
# %select_scatter_default_14 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_11, %select_scatter_default_13, 0, 1), kwargs = {})
# %select_scatter_default_17 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_14, %select_scatter_default_16, 0, 1), kwargs = {})
triton_poi_fused_fill_lift_fresh_ones_like_4 = async_compile.triton('triton_poi_fused_fill_lift_fresh_ones_like_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_ones_like_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_ones_like_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x4 = xindex % 64
x2 = (xindex // 16) % 4
x1 = (xindex // 4) % 4
x5 = xindex
tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp0 = x3
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp0 == tmp5
tmp9 = x2
tmp10 = tmp9 == tmp1
tmp11 = x1
tmp12 = tmp11 == tmp1
tmp13 = tmp5 == tmp5
tmp14 = tmp1 == tmp5
tmp15 = tmp11 == tmp5
tmp16 = -1.0
tmp17 = 1.0
tmp18 = tl.where(tmp15, tmp16, tmp17)
tmp19 = tl.where(tmp14, tmp18, tmp17)
tmp20 = tl.where(tmp13, tmp19, tmp17)
tmp21 = tl.where(tmp12, tmp16, tmp20)
tmp22 = tmp9 == tmp5
tmp23 = tl.where(tmp22, tmp18, tmp17)
tmp24 = tl.where(tmp13, tmp23, tmp17)
tmp25 = tl.where(tmp10, tmp21, tmp24)
tmp26 = tl.where(tmp6, tmp23, tmp17)
tmp27 = tl.where(tmp6, tmp25, tmp26)
tmp28 = tl.where(tmp6, tmp8, tmp27)
tmp29 = tl.where(tmp6, tmp7, tmp28)
tmp30 = tl.where(tmp2, tmp4, tmp29)
tmp31 = tl.where(tmp2, tmp3, tmp30)
tl.store(out_ptr0 + (x5), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cl/ccl6wlbovhv6lo73iy7pmrbir4ddxwsby7f67ojxshoiu4rrbi22.py
# Topologically Sorted Source Nodes: [setitem_8], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_8 => copy_8, full_default_9
# Graph fragment:
# %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_8 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_90, %full_default_9), kwargs = {})
# %select_scatter_default_24 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_17, %copy_8, 0, 0), kwargs = {})
# %select_scatter_default_25 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_16, %select_scatter_default_24, 0, 0), kwargs = {})
triton_poi_fused_fill_lift_fresh_5 = async_compile.triton('triton_poi_fused_fill_lift_fresh_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x1 = (xindex // 4) % 4
x3 = xindex % 16
x4 = xindex
tmp14 = tl.load(in_ptr0 + (96 + x3), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (112 + x3), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (64 + x4), xmask)
tmp36 = tl.load(in_ptr0 + (128 + x4), xmask)
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 2, tl.int32)
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp5 == tmp6
tmp8 = tl.full([1], 3, tl.int32)
tmp9 = tmp1 == tmp8
tmp10 = tmp3 == tmp8
tmp11 = tmp6 == tmp6
tmp12 = tmp8 == tmp5
tmp13 = tmp3 == tmp5
tmp15 = -1.0
tmp16 = tl.where(tmp13, tmp15, tmp14)
tmp18 = tl.where(tmp12, tmp16, tmp17)
tmp19 = tl.where(tmp11, tmp18, tmp17)
tmp20 = tl.where(tmp10, tmp15, tmp19)
tmp21 = tmp1 == tmp5
tmp23 = tl.where(tmp21, tmp16, tmp22)
tmp24 = tl.where(tmp11, tmp23, tmp22)
tmp25 = tl.where(tmp9, tmp20, tmp24)
tmp27 = tl.where(tmp7, tmp23, tmp26)
tmp28 = tl.where(tmp7, tmp25, tmp27)
tmp29 = tl.where(tmp4, tmp15, tmp28)
tmp30 = tmp0 == tmp8
tmp31 = tmp0 == tmp5
tmp33 = tl.where(tmp31, tmp16, tmp32)
tmp34 = tl.where(tmp11, tmp33, tmp32)
tmp35 = tl.where(tmp30, tmp20, tmp34)
tmp37 = tl.where(tmp7, tmp33, tmp36)
tmp38 = tl.where(tmp7, tmp35, tmp37)
tmp39 = tl.where(tmp2, tmp29, tmp38)
tl.store(out_ptr0 + (x4), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/g6/cg6cd2lwpictv7xe7vbpr7ji3nbp4c73svhk6jzlqlqwyshni6ra.py
# Topologically Sorted Source Nodes: [setitem_6, setitem_7], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_6 => copy_6, full_default_7
# setitem_7 => copy_7, full_default_8
# Graph fragment:
# %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_6 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_68, %full_default_7), kwargs = {})
# %select_scatter_default_18 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_13, %copy_6, 0, 2), kwargs = {})
# %select_scatter_default_19 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_12, %select_scatter_default_18, 0, 2), kwargs = {})
# %select_scatter_default_20 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_17, %select_scatter_default_19, 0, 1), kwargs = {})
# %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_7 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_79, %full_default_8), kwargs = {})
# %select_scatter_default_21 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_15, %copy_7, 0, 3), kwargs = {})
# %select_scatter_default_22 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_14, %select_scatter_default_21, 0, 3), kwargs = {})
# %select_scatter_default_23 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_20, %select_scatter_default_22, 0, 1), kwargs = {})
# %select_scatter_default_26 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_23, %select_scatter_default_25, 0, 2), kwargs = {})
triton_poi_fused_fill_lift_fresh_6 = async_compile.triton('triton_poi_fused_fill_lift_fresh_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x4 = xindex % 64
x2 = (xindex // 16) % 4
x1 = (xindex // 4) % 4
x6 = xindex % 16
x7 = xindex
tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (96 + x6), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (112 + x6), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (64 + x4), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (x7), xmask)
tmp0 = x3
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 1, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = x2
tmp7 = tl.full([1], 3, tl.int32)
tmp8 = tmp6 == tmp7
tmp9 = x1
tmp10 = tmp9 == tmp7
tmp11 = tmp4 == tmp4
tmp12 = tmp7 == tmp1
tmp13 = tmp9 == tmp1
tmp15 = -1.0
tmp16 = tl.where(tmp13, tmp15, tmp14)
tmp18 = tl.where(tmp12, tmp16, tmp17)
tmp19 = tl.where(tmp11, tmp18, tmp17)
tmp20 = tl.where(tmp10, tmp15, tmp19)
tmp21 = tmp6 == tmp1
tmp23 = tl.where(tmp21, tmp16, tmp22)
tmp24 = tl.where(tmp11, tmp23, tmp22)
tmp25 = tl.where(tmp8, tmp20, tmp24)
tmp27 = tl.where(tmp5, tmp23, tmp26)
tmp28 = tl.where(tmp5, tmp25, tmp27)
tmp29 = tl.where(tmp2, tmp3, tmp28)
tl.store(out_ptr0 + (x7), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3c/c3coocxjam5lvcqaujgmyixhlezs7tkgkncieupaji7qgfpyhzfn.py
# Topologically Sorted Source Nodes: [setitem_11], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_11 => copy_11, full_default_12
# Graph fragment:
# %full_default_12 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_11 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_123, %full_default_12), kwargs = {})
# %select_scatter_default_33 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_23, %copy_11, 0, 3), kwargs = {})
# %select_scatter_default_34 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_22, %select_scatter_default_33, 0, 3), kwargs = {})
triton_poi_fused_fill_lift_fresh_7 = async_compile.triton('triton_poi_fused_fill_lift_fresh_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x1 = (xindex // 4) % 4
x3 = xindex % 16
x4 = xindex
tmp12 = tl.load(in_ptr0 + (144 + x3), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (160 + x3), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (176 + x3), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (128 + x4), xmask)
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 2, tl.int32)
tmp6 = tmp5 == tmp5
tmp7 = tmp1 == tmp5
tmp8 = tmp3 == tmp5
tmp9 = tl.full([1], 1, tl.int32)
tmp10 = tmp5 == tmp9
tmp11 = tmp3 == tmp9
tmp13 = -1.0
tmp14 = tl.where(tmp11, tmp13, tmp12)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp6, tmp16, tmp15)
tmp18 = tl.where(tmp8, tmp13, tmp17)
tmp19 = tmp1 == tmp9
tmp21 = tl.where(tmp19, tmp14, tmp20)
tmp22 = tl.where(tmp6, tmp21, tmp20)
tmp23 = tl.where(tmp7, tmp18, tmp22)
tmp24 = tl.where(tmp6, tmp23, tmp22)
tmp25 = tl.where(tmp4, tmp13, tmp24)
tmp26 = tmp0 == tmp5
tmp27 = tmp0 == tmp9
tmp29 = tl.where(tmp27, tmp14, tmp28)
tmp30 = tl.where(tmp6, tmp29, tmp28)
tmp31 = tl.where(tmp26, tmp18, tmp30)
tmp32 = tl.where(tmp6, tmp31, tmp30)
tmp33 = tl.where(tmp2, tmp25, tmp32)
tl.store(out_ptr0 + (x4), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5k/c5kg5y7siwe6bnuycvfamfzclact7gqepiigsayadbaftktu22b3.py
# Topologically Sorted Source Nodes: [setitem_9, setitem_10], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_10 => copy_10, full_default_11
# setitem_9 => copy_9, full_default_10
# Graph fragment:
# %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_9 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_101, %full_default_10), kwargs = {})
# %select_scatter_default_27 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_19, %copy_9, 0, 1), kwargs = {})
# %select_scatter_default_28 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_18, %select_scatter_default_27, 0, 1), kwargs = {})
# %select_scatter_default_29 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_26, %select_scatter_default_28, 0, 2), kwargs = {})
# %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_10 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_112, %full_default_11), kwargs = {})
# %select_scatter_default_30 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_21, %copy_10, 0, 2), kwargs = {})
# %select_scatter_default_31 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_20, %select_scatter_default_30, 0, 2), kwargs = {})
# %select_scatter_default_32 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_29, %select_scatter_default_31, 0, 2), kwargs = {})
# %select_scatter_default_35 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_32, %select_scatter_default_34, 0, 2), kwargs = {})
triton_poi_fused_fill_lift_fresh_8 = async_compile.triton('triton_poi_fused_fill_lift_fresh_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x4 = xindex % 64
x2 = (xindex // 16) % 4
x1 = (xindex // 4) % 4
x6 = xindex % 16
x7 = xindex
tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (144 + x6), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (160 + x6), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (128 + x4), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (x7), xmask)
tmp0 = x3
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x2
tmp5 = tmp4 == tmp1
tmp6 = x1
tmp7 = tmp6 == tmp1
tmp8 = tmp1 == tmp1
tmp9 = tl.full([1], 1, tl.int32)
tmp10 = tmp1 == tmp9
tmp11 = tmp6 == tmp9
tmp13 = -1.0
tmp14 = tl.where(tmp11, tmp13, tmp12)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp8, tmp16, tmp15)
tmp18 = tl.where(tmp7, tmp13, tmp17)
tmp19 = tmp4 == tmp9
tmp21 = tl.where(tmp19, tmp14, tmp20)
tmp22 = tl.where(tmp8, tmp21, tmp20)
tmp23 = tl.where(tmp5, tmp18, tmp22)
tmp25 = tl.where(tmp2, tmp21, tmp24)
tmp26 = tl.where(tmp2, tmp23, tmp25)
tmp27 = tl.where(tmp2, tmp3, tmp26)
tl.store(out_ptr0 + (x7), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/d7/cd7q7w4xgz7bt2px4m3m7lgjhgjekwyzonegxewzaloezea3aedd.py
# Topologically Sorted Source Nodes: [setitem_14], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_14 => copy_14, full_default_15
# Graph fragment:
# %full_default_15 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_14 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_156, %full_default_15), kwargs = {})
# %select_scatter_default_42 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_29, %copy_14, 0, 2), kwargs = {})
# %select_scatter_default_43 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_28, %select_scatter_default_42, 0, 2), kwargs = {})
triton_poi_fused_fill_lift_fresh_9 = async_compile.triton('triton_poi_fused_fill_lift_fresh_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x1 = (xindex // 4) % 4
x3 = xindex % 16
x4 = xindex
tmp13 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (208 + x3), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (224 + x3), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (192 + x4), xmask)
tmp0 = x2
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 3, tl.int32)
tmp6 = tmp5 == tmp5
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp1 == tmp7
tmp9 = tmp3 == tmp7
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = tmp7 == tmp10
tmp12 = tmp3 == tmp10
tmp14 = -1.0
tmp15 = tl.where(tmp12, tmp14, tmp13)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.where(tmp6, tmp17, tmp16)
tmp19 = tl.where(tmp9, tmp14, tmp18)
tmp20 = tmp1 == tmp10
tmp22 = tl.where(tmp20, tmp15, tmp21)
tmp23 = tl.where(tmp6, tmp22, tmp21)
tmp24 = tl.where(tmp8, tmp19, tmp23)
tmp25 = tl.where(tmp6, tmp24, tmp23)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tmp27 = tmp0 == tmp7
tmp28 = tmp0 == tmp10
tmp30 = tl.where(tmp28, tmp15, tmp29)
tmp31 = tl.where(tmp6, tmp30, tmp29)
tmp32 = tl.where(tmp27, tmp19, tmp31)
tmp33 = tl.where(tmp6, tmp32, tmp31)
tmp34 = tl.where(tmp2, tmp26, tmp33)
tl.store(out_ptr0 + (x4), tmp34, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cs/ccsfwyb4wxftjo6rtcp6az7e3vto53qvyyrihtpjd2tyyrglkdn2.py
# Topologically Sorted Source Nodes: [setitem_12, setitem_13], Original ATen: [aten.lift_fresh, aten.fill]
# Source node to ATen node mapping:
# setitem_12 => copy_12, full_default_13
# setitem_13 => copy_13, full_default_14
# Graph fragment:
# %full_default_13 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_12 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_134, %full_default_13), kwargs = {})
# %select_scatter_default_36 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_25, %copy_12, 0, 0), kwargs = {})
# %select_scatter_default_37 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_24, %select_scatter_default_36, 0, 0), kwargs = {})
# %select_scatter_default_38 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_35, %select_scatter_default_37, 0, 3), kwargs = {})
# %full_default_14 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_13 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_145, %full_default_14), kwargs = {})
# %select_scatter_default_39 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_27, %copy_13, 0, 1), kwargs = {})
# %select_scatter_default_40 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_26, %select_scatter_default_39, 0, 1), kwargs = {})
# %select_scatter_default_41 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_38, %select_scatter_default_40, 0, 3), kwargs = {})
# %select_scatter_default_44 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_41, %select_scatter_default_43, 0, 3), kwargs = {})
triton_poi_fused_fill_lift_fresh_10 = async_compile.triton('triton_poi_fused_fill_lift_fresh_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x4 = xindex % 64
x2 = (xindex // 16) % 4
x1 = (xindex // 4) % 4
x6 = xindex % 16
x7 = xindex
tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (192 + x6), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (208 + x6), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (192 + x4), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + (x7), xmask)
tmp0 = x3
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x2
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp4 == tmp5
tmp7 = x1
tmp8 = tmp7 == tmp5
tmp9 = tmp1 == tmp1
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = tmp5 == tmp10
tmp12 = tmp7 == tmp10
tmp14 = -1.0
tmp15 = tl.where(tmp12, tmp14, tmp13)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.where(tmp9, tmp17, tmp16)
tmp19 = tl.where(tmp8, tmp14, tmp18)
tmp20 = tmp4 == tmp10
tmp22 = tl.where(tmp20, tmp15, tmp21)
tmp23 = tl.where(tmp9, tmp22, tmp21)
tmp24 = tl.where(tmp6, tmp19, tmp23)
tmp26 = tl.where(tmp2, tmp22, tmp25)
tmp27 = tl.where(tmp2, tmp24, tmp26)
tmp28 = tl.where(tmp2, tmp3, tmp27)
tl.store(out_ptr0 + (x7), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cg/ccgyn3k3m35wjvniirc3omuaar7huvsx544rjsssb3bubtibzkfu.py
# Topologically Sorted Source Nodes: [setitem_15, nw, max_1, sum_1], Original ATen: [aten.lift_fresh, aten.fill, aten.mul, aten.max, aten.sum]
# Source node to ATen node mapping:
# max_1 => max_1
# nw => mul
# setitem_15 => copy_15, full_default_16
# sum_1 => sum_1
# Graph fragment:
# %full_default_16 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %copy_15 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_167, %full_default_16), kwargs = {})
# %select_scatter_default_45 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_31, %copy_15, 0, 3), kwargs = {})
# %select_scatter_default_46 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_30, %select_scatter_default_45, 0, 3), kwargs = {})
# %select_scatter_default_47 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_44, %select_scatter_default_46, 0, 3), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_scatter_default_47, %arg0_1), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, 1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_fill_lift_fresh_max_mul_sum_11 = async_compile.triton('triton_poi_fused_fill_lift_fresh_max_mul_sum_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fill_lift_fresh_max_mul_sum_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_fill_lift_fresh_max_mul_sum_11(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x1 = (xindex // 4) % 4
x3 = xindex % 16
x4 = xindex
tmp7 = tl.load(in_ptr0 + (240 + x3), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp14 = tl.load(in_ptr1 + (x3 + (64*x2)), xmask)
tmp18 = tl.load(in_ptr0 + (208 + x3), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp22 = tl.load(in_ptr1 + (16 + x3 + (64*x2)), xmask)
tmp27 = tl.load(in_ptr0 + (224 + x3), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp31 = tl.load(in_ptr1 + (32 + x3 + (64*x2)), xmask)
tmp36 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp38 = tl.load(in_ptr1 + (48 + x3 + (64*x2)), xmask)
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp3 == tmp1
tmp5 = x1
tmp6 = tmp5 == tmp1
tmp8 = -1.0
tmp9 = tl.where(tmp6, tmp8, tmp7)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp13 = tl.where(tmp2, tmp11, tmp12)
tmp15 = tmp13 * tmp14
tmp16 = tl.full([1], 1, tl.int32)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp9, tmp18)
tmp21 = tl.where(tmp2, tmp19, tmp20)
tmp23 = tmp21 * tmp22
tmp24 = triton_helpers.maximum(tmp15, tmp23)
tmp25 = tl.full([1], 2, tl.int32)
tmp26 = tmp25 == tmp1
tmp28 = tl.where(tmp26, tmp9, tmp27)
tmp30 = tl.where(tmp2, tmp28, tmp29)
tmp32 = tmp30 * tmp31
tmp33 = triton_helpers.maximum(tmp24, tmp32)
tmp34 = tmp1 == tmp1
tmp35 = tl.where(tmp34, tmp9, tmp7)
tmp37 = tl.where(tmp2, tmp35, tmp36)
tmp39 = tmp37 * tmp38
tmp40 = triton_helpers.maximum(tmp33, tmp39)
tmp41 = tmp15 + tmp23
tmp42 = tmp41 + tmp32
tmp43 = tmp42 + tmp39
tl.store(out_ptr0 + (x4), tmp40, xmask)
tl.store(out_ptr1 + (x4), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/o6/co6tytd2ppuu4uaopq5duefn5z2l2da67l3t6goia5yvqiuj7gqi.py
# Topologically Sorted Source Nodes: [max_2, sum_2, mul_1, tmp_all, loss], Original ATen: [aten.max, aten.sum, aten.mul, aten.add, aten.mean]
# Source node to ATen node mapping:
# loss => mean
# max_2 => max_2
# mul_1 => mul_1
# sum_2 => sum_2
# tmp_all => add
# Graph fragment:
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%getitem, 1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 6e-07), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add,), kwargs = {})
triton_per_fused_add_max_mean_mul_sum_12 = async_compile.triton('triton_per_fused_add_max_mean_mul_sum_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_max_mean_mul_sum_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_max_mean_mul_sum_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r0 + (16*r1)), None)
tmp1 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), None)
tmp3 = tl.load(in_ptr0 + (8 + r0 + (16*r1)), None)
tmp5 = tl.load(in_ptr0 + (12 + r0 + (16*r1)), None)
tmp7 = tl.load(in_ptr1 + (r0 + (16*r1)), None)
tmp8 = tl.load(in_ptr1 + (4 + r0 + (16*r1)), None)
tmp10 = tl.load(in_ptr1 + (8 + r0 + (16*r1)), None)
tmp12 = tl.load(in_ptr1 + (12 + r0 + (16*r1)), None)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = 6e-07
tmp15 = tmp13 * tmp14
tmp16 = tmp6 + tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp20 = 16.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [setitem_2], Original ATen: [aten.lift_fresh, aten.fill]
stream0 = get_raw_stream(0)
triton_poi_fused_fill_lift_fresh_0.run(buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [setitem_3], Original ATen: [aten.lift_fresh, aten.fill]
triton_poi_fused_fill_lift_fresh_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [setitem_4], Original ATen: [aten.lift_fresh, aten.fill]
triton_poi_fused_fill_lift_fresh_2.run(buf1, buf0, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [setitem_5], Original ATen: [aten.lift_fresh, aten.fill]
triton_poi_fused_fill_lift_fresh_3.run(buf2, buf1, buf0, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mask, setitem, setitem_1], Original ATen: [aten.ones_like, aten.lift_fresh, aten.fill]
triton_poi_fused_fill_lift_fresh_ones_like_4.run(buf3, buf2, buf1, buf0, buf4, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [setitem_8], Original ATen: [aten.lift_fresh, aten.fill]
triton_poi_fused_fill_lift_fresh_5.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [setitem_6, setitem_7], Original ATen: [aten.lift_fresh, aten.fill]
triton_poi_fused_fill_lift_fresh_6.run(buf5, buf4, buf6, 256, grid=grid(256), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [setitem_11], Original ATen: [aten.lift_fresh, aten.fill]
triton_poi_fused_fill_lift_fresh_7.run(buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [setitem_9, setitem_10], Original ATen: [aten.lift_fresh, aten.fill]
triton_poi_fused_fill_lift_fresh_8.run(buf7, buf6, buf8, 256, grid=grid(256), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [setitem_14], Original ATen: [aten.lift_fresh, aten.fill]
triton_poi_fused_fill_lift_fresh_9.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [setitem_12, setitem_13], Original ATen: [aten.lift_fresh, aten.fill]
triton_poi_fused_fill_lift_fresh_10.run(buf9, buf8, buf10, 256, grid=grid(256), stream=stream0)
del buf8
buf11 = buf9; del buf9 # reuse
buf12 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [setitem_15, nw, max_1, sum_1], Original ATen: [aten.lift_fresh, aten.fill, aten.mul, aten.max, aten.sum]
triton_poi_fused_fill_lift_fresh_max_mul_sum_11.run(buf10, arg0_1, buf11, buf12, 64, grid=grid(64), stream=stream0)
del arg0_1
del buf10
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [max_2, sum_2, mul_1, tmp_all, loss], Original ATen: [aten.max, aten.sum, aten.mul, aten.add, aten.mean]
triton_per_fused_add_max_mean_mul_sum_12.run(buf14, buf11, buf12, 1, 16, grid=grid(1), stream=stream0)
del buf11
del buf12
return (buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
class CosLoss(nn.Module):
def __init__(self, factor=6e-07, havesum=True, havemax=True):
super(CosLoss, self).__init__()
self.factor = factor
self.havesum = havesum
self.havemax = havemax
def forward(self, w):
mask = torch.ones_like(w)
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
mask[i, j, j] = -1
nw = mask * w
tmp, _ = torch.max(nw, dim=1)
tmp, _ = torch.max(tmp, dim=1)
if self.havesum and self.havemax:
tmp_all = tmp + self.factor * torch.sum(torch.sum(nw, dim=1), dim=1
)
elif self.havesum:
tmp_all = self.factor * torch.sum(torch.sum(nw, dim=1), dim=1)
else:
tmp_all = tmp
loss = torch.mean(tmp_all)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_fill_lift_fresh_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x1 = xindex // 4 % 4
x3 = xindex
tmp0 = x2
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp5 == tmp5
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp1 == tmp7
tmp9 = tmp3 == tmp7
tmp10 = tmp7 == tmp5
tmp11 = tmp3 == tmp5
tmp12 = -1.0
tmp13 = 1.0
tmp14 = tl.where(tmp11, tmp12, tmp13)
tmp15 = tl.where(tmp10, tmp14, tmp13)
tmp16 = tl.where(tmp6, tmp15, tmp13)
tmp17 = tl.where(tmp9, tmp12, tmp16)
tmp18 = tmp1 == tmp5
tmp19 = tl.where(tmp18, tmp14, tmp13)
tmp20 = tl.where(tmp6, tmp19, tmp13)
tmp21 = tl.where(tmp8, tmp17, tmp20)
tmp22 = tl.where(tmp6, tmp21, tmp20)
tmp23 = tl.where(tmp4, tmp12, tmp22)
tmp24 = tmp0 == tmp7
tmp25 = tmp0 == tmp5
tmp26 = tl.where(tmp25, tmp14, tmp13)
tmp27 = tl.where(tmp6, tmp26, tmp13)
tmp28 = tl.where(tmp24, tmp17, tmp27)
tmp29 = tl.where(tmp6, tmp28, tmp27)
tmp30 = tl.where(tmp2, tmp23, tmp29)
tl.store(out_ptr0 + x3, tmp30, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x1 = xindex // 4 % 4
x3 = xindex % 16
x4 = xindex
tmp7 = tl.load(in_ptr0 + (48 + x3), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + x4, xmask)
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp5 == tmp5
tmp8 = tl.full([1], 1, tl.int32)
tmp9 = tmp1 == tmp8
tmp10 = tmp3 == tmp8
tmp11 = tmp8 == tmp5
tmp12 = tmp3 == tmp5
tmp13 = -1.0
tmp14 = 1.0
tmp15 = tl.where(tmp12, tmp13, tmp14)
tmp16 = tl.where(tmp11, tmp15, tmp14)
tmp17 = tl.where(tmp6, tmp16, tmp14)
tmp18 = tl.where(tmp10, tmp13, tmp17)
tmp19 = tmp1 == tmp5
tmp20 = tl.where(tmp19, tmp15, tmp14)
tmp21 = tl.where(tmp6, tmp20, tmp14)
tmp22 = tl.where(tmp9, tmp18, tmp21)
tmp23 = tl.where(tmp6, tmp22, tmp21)
tmp24 = tl.where(tmp6, tmp7, tmp23)
tmp25 = tl.where(tmp4, tmp13, tmp24)
tmp27 = tmp0 == tmp8
tmp28 = tmp0 == tmp5
tmp29 = tl.where(tmp28, tmp15, tmp14)
tmp30 = tl.where(tmp6, tmp29, tmp14)
tmp31 = tl.where(tmp27, tmp18, tmp30)
tmp32 = tl.where(tmp6, tmp31, tmp30)
tmp33 = tl.where(tmp6, tmp26, tmp32)
tmp34 = tl.where(tmp2, tmp25, tmp33)
tl.store(out_ptr0 + x4, tmp34, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x1 = xindex // 4 % 4
x3 = xindex % 16
x4 = xindex
tmp7 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + x4, xmask)
tmp27 = tl.load(in_ptr1 + x4, xmask)
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp5 == tmp1
tmp9 = tmp1 == tmp5
tmp10 = tmp3 == tmp5
tmp11 = tmp1 == tmp1
tmp12 = -1.0
tmp13 = 1.0
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tl.where(tmp6, tmp14, tmp13)
tmp16 = tl.where(tmp11, tmp15, tmp13)
tmp17 = tl.where(tmp10, tmp12, tmp16)
tmp18 = tl.where(tmp11, tmp14, tmp13)
tmp19 = tl.where(tmp11, tmp18, tmp13)
tmp20 = tl.where(tmp9, tmp17, tmp19)
tmp21 = tl.where(tmp6, tmp18, tmp13)
tmp22 = tl.where(tmp6, tmp20, tmp21)
tmp23 = tl.where(tmp6, tmp8, tmp22)
tmp24 = tl.where(tmp6, tmp7, tmp23)
tmp25 = tl.where(tmp4, tmp12, tmp24)
tmp28 = tmp0 == tmp5
tmp29 = tl.where(tmp2, tmp14, tmp13)
tmp30 = tl.where(tmp11, tmp29, tmp13)
tmp31 = tl.where(tmp28, tmp17, tmp30)
tmp32 = tl.where(tmp6, tmp29, tmp13)
tmp33 = tl.where(tmp6, tmp31, tmp32)
tmp34 = tl.where(tmp6, tmp27, tmp33)
tmp35 = tl.where(tmp6, tmp26, tmp34)
tmp36 = tl.where(tmp2, tmp25, tmp35)
tl.store(out_ptr0 + x4, tmp36, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x1 = xindex // 4 % 4
x3 = xindex % 16
x4 = xindex
tmp6 = tl.load(in_ptr0 + (16 + x3), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (16 + x3), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (16 + x3), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + x4, xmask)
tmp27 = tl.load(in_ptr1 + x4, xmask)
tmp28 = tl.load(in_ptr2 + x4, xmask)
tmp0 = x2
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tmp1 == tmp1
tmp7 = tl.full([1], 0, tl.int32)
tmp8 = tmp1 == tmp7
tmp11 = tmp7 == tmp7
tmp12 = tmp3 == tmp7
tmp13 = -1.0
tmp14 = 1.0
tmp15 = tl.where(tmp12, tmp13, tmp14)
tmp16 = tl.where(tmp8, tmp15, tmp14)
tmp17 = tl.where(tmp11, tmp16, tmp14)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tmp19 = tl.where(tmp5, tmp18, tmp17)
tmp20 = tl.where(tmp8, tmp16, tmp14)
tmp21 = tl.where(tmp8, tmp19, tmp20)
tmp22 = tl.where(tmp8, tmp10, tmp21)
tmp23 = tl.where(tmp8, tmp9, tmp22)
tmp24 = tl.where(tmp5, tmp6, tmp23)
tmp25 = tl.where(tmp4, tmp13, tmp24)
tmp29 = tmp0 == tmp7
tmp30 = tl.where(tmp29, tmp15, tmp14)
tmp31 = tl.where(tmp11, tmp30, tmp14)
tmp32 = tl.where(tmp2, tmp18, tmp31)
tmp33 = tl.where(tmp8, tmp30, tmp14)
tmp34 = tl.where(tmp8, tmp32, tmp33)
tmp35 = tl.where(tmp8, tmp28, tmp34)
tmp36 = tl.where(tmp8, tmp27, tmp35)
tmp37 = tl.where(tmp5, tmp26, tmp36)
tmp38 = tl.where(tmp2, tmp25, tmp37)
tl.store(out_ptr0 + x4, tmp38, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_ones_like_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 64
x2 = xindex // 16 % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp0 = x3
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.full([1], 0, tl.int32)
tmp6 = tmp0 == tmp5
tmp9 = x2
tmp10 = tmp9 == tmp1
tmp11 = x1
tmp12 = tmp11 == tmp1
tmp13 = tmp5 == tmp5
tmp14 = tmp1 == tmp5
tmp15 = tmp11 == tmp5
tmp16 = -1.0
tmp17 = 1.0
tmp18 = tl.where(tmp15, tmp16, tmp17)
tmp19 = tl.where(tmp14, tmp18, tmp17)
tmp20 = tl.where(tmp13, tmp19, tmp17)
tmp21 = tl.where(tmp12, tmp16, tmp20)
tmp22 = tmp9 == tmp5
tmp23 = tl.where(tmp22, tmp18, tmp17)
tmp24 = tl.where(tmp13, tmp23, tmp17)
tmp25 = tl.where(tmp10, tmp21, tmp24)
tmp26 = tl.where(tmp6, tmp23, tmp17)
tmp27 = tl.where(tmp6, tmp25, tmp26)
tmp28 = tl.where(tmp6, tmp8, tmp27)
tmp29 = tl.where(tmp6, tmp7, tmp28)
tmp30 = tl.where(tmp2, tmp4, tmp29)
tmp31 = tl.where(tmp2, tmp3, tmp30)
tl.store(out_ptr0 + x5, tmp31, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_5(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x1 = xindex // 4 % 4
x3 = xindex % 16
x4 = xindex
tmp14 = tl.load(in_ptr0 + (96 + x3), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (112 + x3), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (64 + x3), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (128 + x3), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (64 + x4), xmask)
tmp36 = tl.load(in_ptr0 + (128 + x4), xmask)
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 2, tl.int32)
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp5 == tmp6
tmp8 = tl.full([1], 3, tl.int32)
tmp9 = tmp1 == tmp8
tmp10 = tmp3 == tmp8
tmp11 = tmp6 == tmp6
tmp12 = tmp8 == tmp5
tmp13 = tmp3 == tmp5
tmp15 = -1.0
tmp16 = tl.where(tmp13, tmp15, tmp14)
tmp18 = tl.where(tmp12, tmp16, tmp17)
tmp19 = tl.where(tmp11, tmp18, tmp17)
tmp20 = tl.where(tmp10, tmp15, tmp19)
tmp21 = tmp1 == tmp5
tmp23 = tl.where(tmp21, tmp16, tmp22)
tmp24 = tl.where(tmp11, tmp23, tmp22)
tmp25 = tl.where(tmp9, tmp20, tmp24)
tmp27 = tl.where(tmp7, tmp23, tmp26)
tmp28 = tl.where(tmp7, tmp25, tmp27)
tmp29 = tl.where(tmp4, tmp15, tmp28)
tmp30 = tmp0 == tmp8
tmp31 = tmp0 == tmp5
tmp33 = tl.where(tmp31, tmp16, tmp32)
tmp34 = tl.where(tmp11, tmp33, tmp32)
tmp35 = tl.where(tmp30, tmp20, tmp34)
tmp37 = tl.where(tmp7, tmp33, tmp36)
tmp38 = tl.where(tmp7, tmp35, tmp37)
tmp39 = tl.where(tmp2, tmp29, tmp38)
tl.store(out_ptr0 + x4, tmp39, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_6(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 64
x2 = xindex // 16 % 4
x1 = xindex // 4 % 4
x6 = xindex % 16
x7 = xindex
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (96 + x6), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (112 + x6), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (64 + x4), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + x7, xmask)
tmp0 = x3
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = tl.full([1], 1, tl.int32)
tmp5 = tmp0 == tmp4
tmp6 = x2
tmp7 = tl.full([1], 3, tl.int32)
tmp8 = tmp6 == tmp7
tmp9 = x1
tmp10 = tmp9 == tmp7
tmp11 = tmp4 == tmp4
tmp12 = tmp7 == tmp1
tmp13 = tmp9 == tmp1
tmp15 = -1.0
tmp16 = tl.where(tmp13, tmp15, tmp14)
tmp18 = tl.where(tmp12, tmp16, tmp17)
tmp19 = tl.where(tmp11, tmp18, tmp17)
tmp20 = tl.where(tmp10, tmp15, tmp19)
tmp21 = tmp6 == tmp1
tmp23 = tl.where(tmp21, tmp16, tmp22)
tmp24 = tl.where(tmp11, tmp23, tmp22)
tmp25 = tl.where(tmp8, tmp20, tmp24)
tmp27 = tl.where(tmp5, tmp23, tmp26)
tmp28 = tl.where(tmp5, tmp25, tmp27)
tmp29 = tl.where(tmp2, tmp3, tmp28)
tl.store(out_ptr0 + x7, tmp29, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_7(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x1 = xindex // 4 % 4
x3 = xindex % 16
x4 = xindex
tmp12 = tl.load(in_ptr0 + (144 + x3), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (160 + x3), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (176 + x3), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (128 + x4), xmask)
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 2, tl.int32)
tmp6 = tmp5 == tmp5
tmp7 = tmp1 == tmp5
tmp8 = tmp3 == tmp5
tmp9 = tl.full([1], 1, tl.int32)
tmp10 = tmp5 == tmp9
tmp11 = tmp3 == tmp9
tmp13 = -1.0
tmp14 = tl.where(tmp11, tmp13, tmp12)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp6, tmp16, tmp15)
tmp18 = tl.where(tmp8, tmp13, tmp17)
tmp19 = tmp1 == tmp9
tmp21 = tl.where(tmp19, tmp14, tmp20)
tmp22 = tl.where(tmp6, tmp21, tmp20)
tmp23 = tl.where(tmp7, tmp18, tmp22)
tmp24 = tl.where(tmp6, tmp23, tmp22)
tmp25 = tl.where(tmp4, tmp13, tmp24)
tmp26 = tmp0 == tmp5
tmp27 = tmp0 == tmp9
tmp29 = tl.where(tmp27, tmp14, tmp28)
tmp30 = tl.where(tmp6, tmp29, tmp28)
tmp31 = tl.where(tmp26, tmp18, tmp30)
tmp32 = tl.where(tmp6, tmp31, tmp30)
tmp33 = tl.where(tmp2, tmp25, tmp32)
tl.store(out_ptr0 + x4, tmp33, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_8(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 64
x2 = xindex // 16 % 4
x1 = xindex // 4 % 4
x6 = xindex % 16
x7 = xindex
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (144 + x6), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (160 + x6), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (128 + x4), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + x7, xmask)
tmp0 = x3
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x2
tmp5 = tmp4 == tmp1
tmp6 = x1
tmp7 = tmp6 == tmp1
tmp8 = tmp1 == tmp1
tmp9 = tl.full([1], 1, tl.int32)
tmp10 = tmp1 == tmp9
tmp11 = tmp6 == tmp9
tmp13 = -1.0
tmp14 = tl.where(tmp11, tmp13, tmp12)
tmp16 = tl.where(tmp10, tmp14, tmp15)
tmp17 = tl.where(tmp8, tmp16, tmp15)
tmp18 = tl.where(tmp7, tmp13, tmp17)
tmp19 = tmp4 == tmp9
tmp21 = tl.where(tmp19, tmp14, tmp20)
tmp22 = tl.where(tmp8, tmp21, tmp20)
tmp23 = tl.where(tmp5, tmp18, tmp22)
tmp25 = tl.where(tmp2, tmp21, tmp24)
tmp26 = tl.where(tmp2, tmp23, tmp25)
tmp27 = tl.where(tmp2, tmp3, tmp26)
tl.store(out_ptr0 + x7, tmp27, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_9(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x1 = xindex // 4 % 4
x3 = xindex % 16
x4 = xindex
tmp13 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (208 + x3), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (224 + x3), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (192 + x4), xmask)
tmp0 = x2
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = x1
tmp4 = tmp3 == tmp1
tmp5 = tl.full([1], 3, tl.int32)
tmp6 = tmp5 == tmp5
tmp7 = tl.full([1], 1, tl.int32)
tmp8 = tmp1 == tmp7
tmp9 = tmp3 == tmp7
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = tmp7 == tmp10
tmp12 = tmp3 == tmp10
tmp14 = -1.0
tmp15 = tl.where(tmp12, tmp14, tmp13)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.where(tmp6, tmp17, tmp16)
tmp19 = tl.where(tmp9, tmp14, tmp18)
tmp20 = tmp1 == tmp10
tmp22 = tl.where(tmp20, tmp15, tmp21)
tmp23 = tl.where(tmp6, tmp22, tmp21)
tmp24 = tl.where(tmp8, tmp19, tmp23)
tmp25 = tl.where(tmp6, tmp24, tmp23)
tmp26 = tl.where(tmp4, tmp14, tmp25)
tmp27 = tmp0 == tmp7
tmp28 = tmp0 == tmp10
tmp30 = tl.where(tmp28, tmp15, tmp29)
tmp31 = tl.where(tmp6, tmp30, tmp29)
tmp32 = tl.where(tmp27, tmp19, tmp31)
tmp33 = tl.where(tmp6, tmp32, tmp31)
tmp34 = tl.where(tmp2, tmp26, tmp33)
tl.store(out_ptr0 + x4, tmp34, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_10(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 64
x2 = xindex // 16 % 4
x1 = xindex // 4 % 4
x6 = xindex % 16
x7 = xindex
tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (192 + x6), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (208 + x6), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (192 + x4), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr1 + x7, xmask)
tmp0 = x3
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = x2
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp4 == tmp5
tmp7 = x1
tmp8 = tmp7 == tmp5
tmp9 = tmp1 == tmp1
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = tmp5 == tmp10
tmp12 = tmp7 == tmp10
tmp14 = -1.0
tmp15 = tl.where(tmp12, tmp14, tmp13)
tmp17 = tl.where(tmp11, tmp15, tmp16)
tmp18 = tl.where(tmp9, tmp17, tmp16)
tmp19 = tl.where(tmp8, tmp14, tmp18)
tmp20 = tmp4 == tmp10
tmp22 = tl.where(tmp20, tmp15, tmp21)
tmp23 = tl.where(tmp9, tmp22, tmp21)
tmp24 = tl.where(tmp6, tmp19, tmp23)
tmp26 = tl.where(tmp2, tmp22, tmp25)
tmp27 = tl.where(tmp2, tmp24, tmp26)
tmp28 = tl.where(tmp2, tmp3, tmp27)
tl.store(out_ptr0 + x7, tmp28, xmask)
@triton.jit
def triton_poi_fused_fill_lift_fresh_max_mul_sum_11(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x1 = xindex // 4 % 4
x3 = xindex % 16
x4 = xindex
tmp7 = tl.load(in_ptr0 + (240 + x3), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (192 + x3), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp14 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask)
tmp18 = tl.load(in_ptr0 + (208 + x3), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp22 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask)
tmp27 = tl.load(in_ptr0 + (224 + x3), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp31 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask)
tmp36 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp38 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask)
tmp0 = x2
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = tmp3 == tmp1
tmp5 = x1
tmp6 = tmp5 == tmp1
tmp8 = -1.0
tmp9 = tl.where(tmp6, tmp8, tmp7)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp13 = tl.where(tmp2, tmp11, tmp12)
tmp15 = tmp13 * tmp14
tmp16 = tl.full([1], 1, tl.int32)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp9, tmp18)
tmp21 = tl.where(tmp2, tmp19, tmp20)
tmp23 = tmp21 * tmp22
tmp24 = triton_helpers.maximum(tmp15, tmp23)
tmp25 = tl.full([1], 2, tl.int32)
tmp26 = tmp25 == tmp1
tmp28 = tl.where(tmp26, tmp9, tmp27)
tmp30 = tl.where(tmp2, tmp28, tmp29)
tmp32 = tmp30 * tmp31
tmp33 = triton_helpers.maximum(tmp24, tmp32)
tmp34 = tmp1 == tmp1
tmp35 = tl.where(tmp34, tmp9, tmp7)
tmp37 = tl.where(tmp2, tmp35, tmp36)
tmp39 = tmp37 * tmp38
tmp40 = triton_helpers.maximum(tmp33, tmp39)
tmp41 = tmp15 + tmp23
tmp42 = tmp41 + tmp32
tmp43 = tmp42 + tmp39
tl.store(out_ptr0 + x4, tmp40, xmask)
tl.store(out_ptr1 + x4, tmp43, xmask)
@triton.jit
def triton_per_fused_add_max_mean_mul_sum_12(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + (r0 + 16 * r1), None)
tmp1 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), None)
tmp3 = tl.load(in_ptr0 + (8 + r0 + 16 * r1), None)
tmp5 = tl.load(in_ptr0 + (12 + r0 + 16 * r1), None)
tmp7 = tl.load(in_ptr1 + (r0 + 16 * r1), None)
tmp8 = tl.load(in_ptr1 + (4 + r0 + 16 * r1), None)
tmp10 = tl.load(in_ptr1 + (8 + r0 + 16 * r1), None)
tmp12 = tl.load(in_ptr1 + (12 + r0 + 16 * r1), None)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp9 = tmp7 + tmp8
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp14 = 6e-07
tmp15 = tmp13 * tmp14
tmp16 = tmp6 + tmp15
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp20 = 16.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_fill_lift_fresh_0[grid(64)](buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_fill_lift_fresh_1[grid(64)](buf0, buf1, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_fill_lift_fresh_2[grid(64)](buf1, buf0, buf2, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_fill_lift_fresh_3[grid(64)](buf2, buf1, buf0, buf3,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_fill_lift_fresh_ones_like_4[grid(256)](buf3, buf2,
buf1, buf0, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
buf5 = buf3
del buf3
triton_poi_fused_fill_lift_fresh_5[grid(64)](buf4, buf5, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_fill_lift_fresh_6[grid(256)](buf5, buf4, buf6, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused_fill_lift_fresh_7[grid(64)](buf6, buf7, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf8 = buf4
del buf4
triton_poi_fused_fill_lift_fresh_8[grid(256)](buf7, buf6, buf8, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused_fill_lift_fresh_9[grid(64)](buf8, buf9, 64, XBLOCK
=64, num_warps=1, num_stages=1)
buf10 = buf6
del buf6
triton_poi_fused_fill_lift_fresh_10[grid(256)](buf9, buf8, buf10,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf8
buf11 = buf9
del buf9
buf12 = buf2
del buf2
triton_poi_fused_fill_lift_fresh_max_mul_sum_11[grid(64)](buf10,
arg0_1, buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del buf10
buf13 = empty_strided_cuda((), (), torch.float32)
buf14 = buf13
del buf13
triton_per_fused_add_max_mean_mul_sum_12[grid(1)](buf14, buf11,
buf12, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf11
del buf12
return buf14,
class CosLossNew(nn.Module):
def __init__(self, factor=6e-07, havesum=True, havemax=True):
super(CosLossNew, self).__init__()
self.factor = factor
self.havesum = havesum
self.havemax = havemax
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| PatrickGui/Face_Pytorch | CosLoss | false | 971 | [
"Apache-2.0"
] | 0 | ff5b820ca3978883f7cf95f0209fba3ee958c939 | https://github.com/PatrickGui/Face_Pytorch/tree/ff5b820ca3978883f7cf95f0209fba3ee958c939 | import torch
from torch import nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, factor=6e-07, havesum=True, havemax=True):
super().__init__()
self.factor = factor
self.havesum = havesum
self.havemax = havemax
def forward(self, w):
mask = torch.ones_like(w)
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
mask[i, j, j] = -1
nw = mask * w
tmp, _ = torch.max(nw, dim=1)
tmp, _ = torch.max(tmp, dim=1)
if self.havesum and self.havemax:
tmp_all = tmp + self.factor * torch.sum(torch.sum(nw, dim=1), dim=1
)
elif self.havesum:
tmp_all = self.factor * torch.sum(torch.sum(nw, dim=1), dim=1)
else:
tmp_all = tmp
loss = torch.mean(tmp_all)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CumulativeLinkLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/6n/c6ncaq4deouortxdbxjqvks6ti64tknrwk6gyzg4iubs5lsdoh6o.py
# Topologically Sorted Source Nodes: [gather, likelihoods, log, neg_log_likelihood, loss], Original ATen: [aten.gather, aten.clamp, aten.log, aten.neg, aten.mean]
# Source node to ATen node mapping:
# gather => gather
# likelihoods => clamp_max, clamp_min, convert_element_type
# log => log
# loss => mean
# neg_log_likelihood => neg
# Graph fragment:
# %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %unsqueeze), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%gather, 1e-15), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 0.999999999999999), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_max, torch.int64), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%convert_element_type,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {})
triton_per_fused_clamp_gather_log_mean_neg_0 = async_compile.triton('triton_per_fused_clamp_gather_log_mean_neg_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_gather_log_mean_neg_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_gather_log_mean_neg_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last')
tmp7 = tmp6.to(tl.float32)
tmp8 = 1e-15
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = 0.999999999999999
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp12 = tmp11.to(tl.int64)
tmp13 = tmp12.to(tl.float32)
tmp14 = tl_math.log(tmp13)
tmp15 = -tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [gather, likelihoods, log, neg_log_likelihood, loss], Original ATen: [aten.gather, aten.clamp, aten.log, aten.neg, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_clamp_gather_log_mean_neg_0.run(buf1, arg1_1, arg0_1, 1, 4, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
arg1_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
from typing import Optional
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
def _reduction(loss: 'torch.Tensor', reduction: 'str') ->torch.Tensor:
"""
Reduce loss
Parameters
----------
loss : torch.Tensor, [batch_size, num_classes]
Batch losses.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
Returns
-------
loss : torch.Tensor
Reduced loss.
"""
if reduction == 'elementwise_mean':
return loss.mean()
elif reduction == 'none':
return loss
elif reduction == 'sum':
return loss.sum()
else:
raise ValueError(f'{reduction} is not a valid reduction')
def cumulative_link_loss(y_pred: 'torch.Tensor', y_true: 'torch.Tensor',
reduction: 'str'='elementwise_mean', class_weights:
'Optional[np.ndarray]'=None) ->torch.Tensor:
"""
Calculates the negative log likelihood using the logistic cumulative link
function.
See "On the consistency of ordinal regression methods", Pedregosa et. al.
for more details. While this paper is not the first to introduce this, it
is the only one that I could find that was easily readable outside of
paywalls.
Parameters
----------
y_pred : torch.Tensor, [batch_size, num_classes]
Predicted target class probabilities. float dtype.
y_true : torch.Tensor, [batch_size, 1]
True target classes. long dtype.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
class_weights : np.ndarray, [num_classes] optional (default=None)
An array of weights for each class. If included, then for each sample,
look up the true class and multiply that sample's loss by the weight in
this array.
Returns
-------
loss: torch.Tensor
"""
eps = 1e-15
likelihoods = torch.clamp(torch.gather(y_pred, 1, y_true.unsqueeze(1)),
eps, 1 - eps)
neg_log_likelihood = -torch.log(likelihoods)
if class_weights is not None:
class_weights = torch.as_tensor(class_weights, dtype=
neg_log_likelihood.dtype, device=neg_log_likelihood.device)
neg_log_likelihood *= class_weights[y_true]
loss = _reduction(neg_log_likelihood, reduction)
return loss
class CumulativeLinkLoss(nn.Module):
"""
Module form of cumulative_link_loss() loss function
Parameters
----------
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
class_weights : np.ndarray, [num_classes] optional (default=None)
An array of weights for each class. If included, then for each sample,
look up the true class and multiply that sample's loss by the weight in
this array.
"""
def __init__(self, reduction: 'str'='elementwise_mean', class_weights:
'Optional[torch.Tensor]'=None) ->None:
super().__init__()
self.class_weights = class_weights
self.reduction = reduction
def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor'
) ->torch.Tensor:
return cumulative_link_loss(y_pred, y_true, reduction=self.
reduction, class_weights=self.class_weights)
def get_inputs():
return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype=
torch.int64)]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
from torch import nn
from typing import Optional
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_clamp_gather_log_mean_neg_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy=
'evict_last')
tmp7 = tmp6.to(tl.float32)
tmp8 = 1e-15
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = 0.999999999999999
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp12 = tmp11.to(tl.int64)
tmp13 = tmp12.to(tl.float32)
tmp14 = tl_math.log(tmp13)
tmp15 = -tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_clamp_gather_log_mean_neg_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def _reduction(loss: 'torch.Tensor', reduction: 'str') ->torch.Tensor:
"""
Reduce loss
Parameters
----------
loss : torch.Tensor, [batch_size, num_classes]
Batch losses.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
Returns
-------
loss : torch.Tensor
Reduced loss.
"""
if reduction == 'elementwise_mean':
return loss.mean()
elif reduction == 'none':
return loss
elif reduction == 'sum':
return loss.sum()
else:
raise ValueError(f'{reduction} is not a valid reduction')
def cumulative_link_loss(y_pred: 'torch.Tensor', y_true: 'torch.Tensor',
reduction: 'str'='elementwise_mean', class_weights:
'Optional[np.ndarray]'=None) ->torch.Tensor:
"""
Calculates the negative log likelihood using the logistic cumulative link
function.
See "On the consistency of ordinal regression methods", Pedregosa et. al.
for more details. While this paper is not the first to introduce this, it
is the only one that I could find that was easily readable outside of
paywalls.
Parameters
----------
y_pred : torch.Tensor, [batch_size, num_classes]
Predicted target class probabilities. float dtype.
y_true : torch.Tensor, [batch_size, 1]
True target classes. long dtype.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
class_weights : np.ndarray, [num_classes] optional (default=None)
An array of weights for each class. If included, then for each sample,
look up the true class and multiply that sample's loss by the weight in
this array.
Returns
-------
loss: torch.Tensor
"""
eps = 1e-15
likelihoods = torch.clamp(torch.gather(y_pred, 1, y_true.unsqueeze(1)),
eps, 1 - eps)
neg_log_likelihood = -torch.log(likelihoods)
if class_weights is not None:
class_weights = torch.as_tensor(class_weights, dtype=
neg_log_likelihood.dtype, device=neg_log_likelihood.device)
neg_log_likelihood *= class_weights[y_true]
loss = _reduction(neg_log_likelihood, reduction)
return loss
class CumulativeLinkLossNew(nn.Module):
"""
Module form of cumulative_link_loss() loss function
Parameters
----------
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
class_weights : np.ndarray, [num_classes] optional (default=None)
An array of weights for each class. If included, then for each sample,
look up the true class and multiply that sample's loss by the weight in
this array.
"""
def __init__(self, reduction: 'str'='elementwise_mean', class_weights:
'Optional[torch.Tensor]'=None) ->None:
super().__init__()
self.class_weights = class_weights
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Ramstein/Retinopathy2 | CumulativeLinkLoss | false | 972 | [
"MIT"
] | 0 | 669e74206c466e6351d4e3df6087c6aa39b5c6c2 | https://github.com/Ramstein/Retinopathy2/tree/669e74206c466e6351d4e3df6087c6aa39b5c6c2 | import torch
import numpy as np
from torch import nn
from typing import Optional
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
def _reduction(loss: 'torch.Tensor', reduction: 'str') ->torch.Tensor:
"""
Reduce loss
Parameters
----------
loss : torch.Tensor, [batch_size, num_classes]
Batch losses.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
Returns
-------
loss : torch.Tensor
Reduced loss.
"""
if reduction == 'elementwise_mean':
return loss.mean()
elif reduction == 'none':
return loss
elif reduction == 'sum':
return loss.sum()
else:
raise ValueError(f'{reduction} is not a valid reduction')
def cumulative_link_loss(y_pred: 'torch.Tensor', y_true: 'torch.Tensor',
reduction: 'str'='elementwise_mean', class_weights:
'Optional[np.ndarray]'=None) ->torch.Tensor:
"""
Calculates the negative log likelihood using the logistic cumulative link
function.
See "On the consistency of ordinal regression methods", Pedregosa et. al.
for more details. While this paper is not the first to introduce this, it
is the only one that I could find that was easily readable outside of
paywalls.
Parameters
----------
y_pred : torch.Tensor, [batch_size, num_classes]
Predicted target class probabilities. float dtype.
y_true : torch.Tensor, [batch_size, 1]
True target classes. long dtype.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
class_weights : np.ndarray, [num_classes] optional (default=None)
An array of weights for each class. If included, then for each sample,
look up the true class and multiply that sample's loss by the weight in
this array.
Returns
-------
loss: torch.Tensor
"""
eps = 1e-15
likelihoods = torch.clamp(torch.gather(y_pred, 1, y_true.unsqueeze(1)),
eps, 1 - eps)
neg_log_likelihood = -torch.log(likelihoods)
if class_weights is not None:
class_weights = torch.as_tensor(class_weights, dtype=
neg_log_likelihood.dtype, device=neg_log_likelihood.device)
neg_log_likelihood *= class_weights[y_true]
loss = _reduction(neg_log_likelihood, reduction)
return loss
class Model(nn.Module):
"""
Module form of cumulative_link_loss() loss function
Parameters
----------
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
class_weights : np.ndarray, [num_classes] optional (default=None)
An array of weights for each class. If included, then for each sample,
look up the true class and multiply that sample's loss by the weight in
this array.
"""
def __init__(self, reduction: 'str'='elementwise_mean', class_weights:
'Optional[torch.Tensor]'=None) ->None:
super().__init__()
self.class_weights = class_weights
self.reduction = reduction
def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor'
) ->torch.Tensor:
return cumulative_link_loss(y_pred, y_true, reduction=self.
reduction, class_weights=self.class_weights)
def get_inputs():
return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype=
torch.int64)]
def get_init_inputs():
return []
|
CustomBatchNormAutograd | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/up/cupno3pzyw77bw3jxggqthxk5iageb5gntveyyc7vpvocufttkij.py
# Topologically Sorted Source Nodes: [sum_1, mean, var, sub, add, sqrt, norm, mul_1, out], Original ATen: [aten.sum, aten.mul, aten.var, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mul
# mul_1 => mul_1
# norm => div
# out => add_1
# sqrt => sqrt
# sub => sub
# sum_1 => sum_1
# var => var
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [0]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.25), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [0]), kwargs = {correction: 0})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mul), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0 = async_compile.triton('triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask)
tmp2 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (64 + x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (128 + x5), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (192 + x5), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = tmp1 - tmp10
tmp12 = 4.0
tmp13 = tmp8 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp3 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp5 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp7 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp11 / tmp28
tmp30 = tmp0 * tmp29
tmp32 = tmp30 + tmp31
tl.store(in_out_ptr0 + (x4), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sum_1, mean, var, sub, add, sqrt, norm, mul_1, out], Original ATen: [aten.sum, aten.mul, aten.var, aten.sub, aten.add, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0.run(buf1, primals_2, primals_1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf1, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CustomBatchNormAutograd(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
The operations called in self.forward track the history if the input tensors have the
flag requires_grad set to True.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormAutograd object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super(CustomBatchNormAutograd, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.beta = nn.Parameter(torch.zeros(self.n_neurons))
self.gamma = nn.Parameter(torch.ones(self.n_neurons))
def forward(self, input):
"""
Compute the batch normalization
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
"""
batch_size = input.shape[0]
assert input.shape[1
] == self.n_neurons, 'Input not in the correct shape'
mean = 1 / batch_size * torch.sum(input, dim=0)
var = input.var(dim=0, unbiased=False)
norm = (input - mean) / torch.sqrt(var + self.eps)
out = self.gamma * norm + self.beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_neurons': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (64 + x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (128 + x5), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (192 + x5), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = tmp1 - tmp10
tmp12 = 4.0
tmp13 = tmp8 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp3 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp5 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp7 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp11 / tmp28
tmp30 = tmp0 * tmp29
tmp32 = tmp30 + tmp31
tl.store(in_out_ptr0 + x4, tmp32, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0[grid(256)](buf1,
primals_2, primals_1, primals_3, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf1, primals_1
class CustomBatchNormAutogradNew(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
The operations called in self.forward track the history if the input tensors have the
flag requires_grad set to True.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormAutograd object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super(CustomBatchNormAutogradNew, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.beta = nn.Parameter(torch.zeros(self.n_neurons))
self.gamma = nn.Parameter(torch.ones(self.n_neurons))
def forward(self, input_0):
primals_2 = self.beta
primals_3 = self.gamma
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| RaymondKoopmanschap/DL_assignment_code | CustomBatchNormAutograd | false | 973 | [
"MIT"
] | 0 | 68b3290be9fbd6c55433a7585e2cfa18e0f35f5c | https://github.com/RaymondKoopmanschap/DL_assignment_code/tree/68b3290be9fbd6c55433a7585e2cfa18e0f35f5c | import torch
import torch.nn as nn
class Model(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
The operations called in self.forward track the history if the input tensors have the
flag requires_grad set to True.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormAutograd object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super().__init__()
self.n_neurons = n_neurons
self.eps = eps
self.beta = nn.Parameter(torch.zeros(self.n_neurons))
self.gamma = nn.Parameter(torch.ones(self.n_neurons))
def forward(self, input):
"""
Compute the batch normalization
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
"""
batch_size = input.shape[0]
assert input.shape[1
] == self.n_neurons, 'Input not in the correct shape'
mean = 1 / batch_size * torch.sum(input, dim=0)
var = input.var(dim=0, unbiased=False)
norm = (input - mean) / torch.sqrt(var + self.eps)
out = self.gamma * norm + self.beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MaxPool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ud/cude6zl4nio2ly5l3l5cwlmxkoqtt4qkekbvrzk6nz7rpwc6ypf3.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 3) % 3
x0 = xindex % 3
x2 = (xindex // 9)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=float("-inf"))
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4) + x0 + (4*x1) + (16*x2)), tmp16 & xmask, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3) + x0 + (4*x1) + (16*x2)), tmp23 & xmask, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + ((-2) + x0 + (4*x1) + (16*x2)), tmp30 & xmask, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = x1
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp9
tmp38 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1) + (16*x2)), tmp37 & xmask, other=float("-inf"))
tmp39 = triton_helpers.maximum(tmp38, tmp32)
tmp40 = tmp36 & tmp15
tmp41 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp40 & xmask, other=float("-inf"))
tmp42 = triton_helpers.maximum(tmp41, tmp39)
tmp43 = tmp36 & tmp22
tmp44 = tl.load(in_ptr0 + (1 + x0 + (4*x1) + (16*x2)), tmp43 & xmask, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp42)
tmp46 = tmp36 & tmp29
tmp47 = tl.load(in_ptr0 + (2 + x0 + (4*x1) + (16*x2)), tmp46 & xmask, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = 1 + x1
tmp50 = tmp49 >= tmp1
tmp51 = tmp49 < tmp3
tmp52 = tmp50 & tmp51
tmp53 = tmp52 & tmp9
tmp54 = tl.load(in_ptr0 + (3 + x0 + (4*x1) + (16*x2)), tmp53 & xmask, other=float("-inf"))
tmp55 = triton_helpers.maximum(tmp54, tmp48)
tmp56 = tmp52 & tmp15
tmp57 = tl.load(in_ptr0 + (4 + x0 + (4*x1) + (16*x2)), tmp56 & xmask, other=float("-inf"))
tmp58 = triton_helpers.maximum(tmp57, tmp55)
tmp59 = tmp52 & tmp22
tmp60 = tl.load(in_ptr0 + (5 + x0 + (4*x1) + (16*x2)), tmp59 & xmask, other=float("-inf"))
tmp61 = triton_helpers.maximum(tmp60, tmp58)
tmp62 = tmp52 & tmp29
tmp63 = tl.load(in_ptr0 + (6 + x0 + (4*x1) + (16*x2)), tmp62 & xmask, other=float("-inf"))
tmp64 = triton_helpers.maximum(tmp63, tmp61)
tmp65 = 2 + x1
tmp66 = tmp65 >= tmp1
tmp67 = tmp65 < tmp3
tmp68 = tmp66 & tmp67
tmp69 = tmp68 & tmp9
tmp70 = tl.load(in_ptr0 + (7 + x0 + (4*x1) + (16*x2)), tmp69 & xmask, other=float("-inf"))
tmp71 = triton_helpers.maximum(tmp70, tmp64)
tmp72 = tmp68 & tmp15
tmp73 = tl.load(in_ptr0 + (8 + x0 + (4*x1) + (16*x2)), tmp72 & xmask, other=float("-inf"))
tmp74 = triton_helpers.maximum(tmp73, tmp71)
tmp75 = tmp68 & tmp22
tmp76 = tl.load(in_ptr0 + (9 + x0 + (4*x1) + (16*x2)), tmp75 & xmask, other=float("-inf"))
tmp77 = triton_helpers.maximum(tmp76, tmp74)
tmp78 = tmp68 & tmp29
tmp79 = tl.load(in_ptr0 + (10 + x0 + (4*x1) + (16*x2)), tmp78 & xmask, other=float("-inf"))
tmp80 = triton_helpers.maximum(tmp79, tmp77)
tl.store(out_ptr0 + (x4), tmp80, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 144, grid=grid(144), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class MaxPool(nn.Module):
def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False):
super(MaxPool, self).__init__()
self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding)
def forward(self, x):
if self.zero_pad:
x = self.zero_pad(x)
x = self.pool(x)
if self.zero_pad:
x = x[:, :, 1:, 1:]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=float('-inf'))
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + x0 + 4 * x1 + 16 * x2), tmp16 & xmask,
other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + x0 + 4 * x1 + 16 * x2), tmp23 & xmask,
other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp5 & tmp29
tmp31 = tl.load(in_ptr0 + (-2 + x0 + 4 * x1 + 16 * x2), tmp30 & xmask,
other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = x1
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp9
tmp38 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1 + 16 * x2), tmp37 & xmask,
other=float('-inf'))
tmp39 = triton_helpers.maximum(tmp38, tmp32)
tmp40 = tmp36 & tmp15
tmp41 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp40 & xmask, other
=float('-inf'))
tmp42 = triton_helpers.maximum(tmp41, tmp39)
tmp43 = tmp36 & tmp22
tmp44 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), tmp43 & xmask,
other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp42)
tmp46 = tmp36 & tmp29
tmp47 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), tmp46 & xmask,
other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = 1 + x1
tmp50 = tmp49 >= tmp1
tmp51 = tmp49 < tmp3
tmp52 = tmp50 & tmp51
tmp53 = tmp52 & tmp9
tmp54 = tl.load(in_ptr0 + (3 + x0 + 4 * x1 + 16 * x2), tmp53 & xmask,
other=float('-inf'))
tmp55 = triton_helpers.maximum(tmp54, tmp48)
tmp56 = tmp52 & tmp15
tmp57 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), tmp56 & xmask,
other=float('-inf'))
tmp58 = triton_helpers.maximum(tmp57, tmp55)
tmp59 = tmp52 & tmp22
tmp60 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), tmp59 & xmask,
other=float('-inf'))
tmp61 = triton_helpers.maximum(tmp60, tmp58)
tmp62 = tmp52 & tmp29
tmp63 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), tmp62 & xmask,
other=float('-inf'))
tmp64 = triton_helpers.maximum(tmp63, tmp61)
tmp65 = 2 + x1
tmp66 = tmp65 >= tmp1
tmp67 = tmp65 < tmp3
tmp68 = tmp66 & tmp67
tmp69 = tmp68 & tmp9
tmp70 = tl.load(in_ptr0 + (7 + x0 + 4 * x1 + 16 * x2), tmp69 & xmask,
other=float('-inf'))
tmp71 = triton_helpers.maximum(tmp70, tmp64)
tmp72 = tmp68 & tmp15
tmp73 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), tmp72 & xmask,
other=float('-inf'))
tmp74 = triton_helpers.maximum(tmp73, tmp71)
tmp75 = tmp68 & tmp22
tmp76 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), tmp75 & xmask,
other=float('-inf'))
tmp77 = triton_helpers.maximum(tmp76, tmp74)
tmp78 = tmp68 & tmp29
tmp79 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), tmp78 & xmask,
other=float('-inf'))
tmp80 = triton_helpers.maximum(tmp79, tmp77)
tl.store(out_ptr0 + x4, tmp80, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(144)](arg0_1, buf0,
144, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MaxPoolNew(nn.Module):
def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False):
super(MaxPoolNew, self).__init__()
self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Ramstein/Retinopathy2 | MaxPool | false | 974 | [
"MIT"
] | 0 | 669e74206c466e6351d4e3df6087c6aa39b5c6c2 | https://github.com/Ramstein/Retinopathy2/tree/669e74206c466e6351d4e3df6087c6aa39b5c6c2 | import torch
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self, kernel_size, stride=1, padding=1, zero_pad=False):
super().__init__()
self.zero_pad = nn.ZeroPad2d((1, 0, 1, 0)) if zero_pad else None
self.pool = nn.MaxPool2d(kernel_size, stride=stride, padding=padding)
def forward(self, x):
if self.zero_pad:
x = self.zero_pad(x)
x = self.pool(x)
if self.zero_pad:
x = x[:, :, 1:, 1:]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
LinearEnsemble | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/jv/cjvcsredzlnp5p23u3wgkkflope6kvqewy3nepikau7sddqcldfj.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, %unsqueeze), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch as T
import torch.nn as nn
class LinearEnsemble(nn.Module):
__constants__ = ['in_features', 'out_features']
ensemble_size: 'int'
in_features: 'int'
out_features: 'int'
weight: 'T.Tensor'
def __init__(self, ensemble_size: 'int', in_features: 'int',
out_features: 'int', weight_decay: 'float'=0.0, bias: 'bool'=True
) ->None:
super(LinearEnsemble, self).__init__()
self.ensemble_size = ensemble_size
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(T.Tensor(ensemble_size, in_features,
out_features))
self.weight_decay = weight_decay
if bias:
self.bias = nn.Parameter(T.Tensor(ensemble_size, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) ->None:
pass
def forward(self, x: 'T.Tensor') ->T.Tensor:
return T.add(T.bmm(x, self.weight), self.bias[:, None, :])
def extra_repr(self) ->str:
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'ensemble_size': 4, 'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch as T
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0)
class LinearEnsembleNew(nn.Module):
__constants__ = ['in_features', 'out_features']
ensemble_size: 'int'
in_features: 'int'
out_features: 'int'
weight: 'T.Tensor'
def __init__(self, ensemble_size: 'int', in_features: 'int',
out_features: 'int', weight_decay: 'float'=0.0, bias: 'bool'=True
) ->None:
super(LinearEnsembleNew, self).__init__()
self.ensemble_size = ensemble_size
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(T.Tensor(ensemble_size, in_features,
out_features))
self.weight_decay = weight_decay
if bias:
self.bias = nn.Parameter(T.Tensor(ensemble_size, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) ->None:
pass
def extra_repr(self) ->str:
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| RamiSketcher/AMMI-RL | LinearEnsemble | false | 975 | [
"MIT"
] | 0 | 6d51587ff4d5dc14cba87fca561bd7b340b44586 | https://github.com/RamiSketcher/AMMI-RL/tree/6d51587ff4d5dc14cba87fca561bd7b340b44586 | import torch
import torch as T
import torch.nn as nn
class Model(nn.Module):
__constants__ = ['in_features', 'out_features']
ensemble_size: 'int'
in_features: 'int'
out_features: 'int'
weight: 'T.Tensor'
def __init__(self, ensemble_size: 'int', in_features: 'int',
out_features: 'int', weight_decay: 'float'=0.0, bias: 'bool'=True
) ->None:
super().__init__()
self.ensemble_size = ensemble_size
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(T.Tensor(ensemble_size, in_features,
out_features))
self.weight_decay = weight_decay
if bias:
self.bias = nn.Parameter(T.Tensor(ensemble_size, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) ->None:
pass
def forward(self, x: 'T.Tensor') ->T.Tensor:
return T.add(T.bmm(x, self.weight), self.bias[:, None, :])
def extra_repr(self) ->str:
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
WordPredictor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/dp/cdpfrqskwwuqnfeupok3qgc45wzitvxhdnpcf5uabibiblorlnoa.py
# Topologically Sorted Source Nodes: [hidden, mean_hidden, max_1, add], Original ATen: [aten.relu, aten.mean, aten.max, aten.add]
# Source node to ATen node mapping:
# add => add
# hidden => relu
# max_1 => max_1
# mean_hidden => mean
# Graph fragment:
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu, [0]), kwargs = {})
# %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.dim](args = (%relu, 0), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %getitem), kwargs = {})
triton_poi_fused_add_max_mean_relu_0 = async_compile.triton('triton_poi_fused_add_max_mean_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_max_mean_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_max_mean_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp23 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp40 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = tmp4 > tmp7
tmp9 = tmp4 == tmp7
tmp10 = tmp4 != tmp4
tmp11 = tmp7 != tmp7
tmp12 = tmp10 > tmp11
tmp13 = tmp8 | tmp12
tmp14 = tmp10 & tmp11
tmp15 = tmp9 | tmp14
tmp16 = tl.full([1], 0, tl.int64)
tmp17 = tl.full([1], 1, tl.int64)
tmp18 = tmp16 < tmp17
tmp19 = tmp15 & tmp18
tmp20 = tmp13 | tmp19
tmp21 = tl.where(tmp20, tmp4, tmp7)
tmp22 = tl.where(tmp20, tmp16, tmp17)
tmp24 = tmp23 + tmp1
tmp25 = triton_helpers.maximum(tmp3, tmp24)
tmp26 = tmp21 > tmp25
tmp27 = tmp21 == tmp25
tmp28 = tmp21 != tmp21
tmp29 = tmp25 != tmp25
tmp30 = tmp28 > tmp29
tmp31 = tmp26 | tmp30
tmp32 = tmp28 & tmp29
tmp33 = tmp27 | tmp32
tmp34 = tl.full([1], 2, tl.int64)
tmp35 = tmp22 < tmp34
tmp36 = tmp33 & tmp35
tmp37 = tmp31 | tmp36
tmp38 = tl.where(tmp37, tmp21, tmp25)
tmp39 = tl.where(tmp37, tmp22, tmp34)
tmp41 = tmp40 + tmp1
tmp42 = triton_helpers.maximum(tmp3, tmp41)
tmp43 = tmp38 > tmp42
tmp44 = tmp38 == tmp42
tmp45 = tmp38 != tmp38
tmp46 = tmp42 != tmp42
tmp47 = tmp45 > tmp46
tmp48 = tmp43 | tmp47
tmp49 = tmp45 & tmp46
tmp50 = tmp44 | tmp49
tmp51 = tl.full([1], 3, tl.int64)
tmp52 = tmp39 < tmp51
tmp53 = tmp50 & tmp52
tmp54 = tmp48 | tmp53
tmp55 = tl.where(tmp54, tmp38, tmp42)
tmp56 = tl.where(tmp54, tmp39, tmp51)
tmp57 = tmp4 + tmp7
tmp58 = tmp57 + tmp25
tmp59 = tmp58 + tmp42
tmp60 = 4.0
tmp61 = tmp59 / tmp60
tmp62 = triton_helpers.maximum(tmp4, tmp7)
tmp63 = triton_helpers.maximum(tmp62, tmp25)
tmp64 = triton_helpers.maximum(tmp63, tmp42)
tmp65 = tmp61 + tmp64
tl.store(out_ptr0 + (x2), tmp56, xmask)
tl.store(out_ptr1 + (x2), tmp65, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/i3/ci32bshm7vv6yycmhvqgk6df7gy4rk2dkcyol7iwwj7ttakuvnhx.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hidden => relu
# Graph fragment:
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden, mean_hidden, max_1, add], Original ATen: [aten.relu, aten.mean, aten.max, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_max_mean_relu_0.run(buf0, primals_3, buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf0, primals_3, buf4, 64, grid=grid(64), stream=stream0)
del buf0
del primals_3
return (buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf2, primals_4, reinterpret_tensor(buf1, (1, 4, 4), (16, 4, 1), 0), buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
class WordPredictor(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim,
topk_labels_per_source_token=None, use_self_attention=False):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.topk_labels_per_source_token = topk_labels_per_source_token
self.use_self_attention = use_self_attention
if self.use_self_attention:
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
else:
self.hidden_layer = nn.Linear(encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def forward(self, encoder_output):
encoder_hiddens, *_ = encoder_output
assert encoder_hiddens.dim()
if self.use_self_attention:
init_state = self._get_init_state(encoder_hiddens)
attn_scores = self._attention(encoder_hiddens, init_state)
attned_state = (encoder_hiddens * attn_scores).sum(0)
pred_input = torch.cat([init_state, attned_state], 1)
pred_hidden = F.relu(self.hidden_layer(pred_input))
logits = self.output_layer(pred_hidden)
else:
hidden = F.relu(self.hidden_layer(encoder_hiddens))
mean_hidden = torch.mean(hidden, 0)
max_hidden = torch.max(hidden, 0)[0]
logits = self.output_layer(mean_hidden + max_hidden)
return logits
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def get_topk_predicted_tokens(self, net_output, src_tokens, log_probs:
'bool'):
"""
Get self.topk_labels_per_source_token top predicted words for vocab
reduction (per source token).
"""
assert isinstance(self.topk_labels_per_source_token, int
) and self.topk_labels_per_source_token > 0, 'topk_labels_per_source_token must be a positive int, or None'
k = src_tokens.size(1) * self.topk_labels_per_source_token
probs = self.get_normalized_probs(net_output, log_probs)
_, topk_indices = torch.topk(probs, k, dim=1)
return topk_indices
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'encoder_output_dim': 4, 'hidden_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_max_mean_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x2), xmask)
tmp23 = tl.load(in_ptr0 + (32 + x2), xmask)
tmp40 = tl.load(in_ptr0 + (48 + x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = tmp4 > tmp7
tmp9 = tmp4 == tmp7
tmp10 = tmp4 != tmp4
tmp11 = tmp7 != tmp7
tmp12 = tmp10 > tmp11
tmp13 = tmp8 | tmp12
tmp14 = tmp10 & tmp11
tmp15 = tmp9 | tmp14
tmp16 = tl.full([1], 0, tl.int64)
tmp17 = tl.full([1], 1, tl.int64)
tmp18 = tmp16 < tmp17
tmp19 = tmp15 & tmp18
tmp20 = tmp13 | tmp19
tmp21 = tl.where(tmp20, tmp4, tmp7)
tmp22 = tl.where(tmp20, tmp16, tmp17)
tmp24 = tmp23 + tmp1
tmp25 = triton_helpers.maximum(tmp3, tmp24)
tmp26 = tmp21 > tmp25
tmp27 = tmp21 == tmp25
tmp28 = tmp21 != tmp21
tmp29 = tmp25 != tmp25
tmp30 = tmp28 > tmp29
tmp31 = tmp26 | tmp30
tmp32 = tmp28 & tmp29
tmp33 = tmp27 | tmp32
tmp34 = tl.full([1], 2, tl.int64)
tmp35 = tmp22 < tmp34
tmp36 = tmp33 & tmp35
tmp37 = tmp31 | tmp36
tmp38 = tl.where(tmp37, tmp21, tmp25)
tmp39 = tl.where(tmp37, tmp22, tmp34)
tmp41 = tmp40 + tmp1
tmp42 = triton_helpers.maximum(tmp3, tmp41)
tmp43 = tmp38 > tmp42
tmp44 = tmp38 == tmp42
tmp45 = tmp38 != tmp38
tmp46 = tmp42 != tmp42
tmp47 = tmp45 > tmp46
tmp48 = tmp43 | tmp47
tmp49 = tmp45 & tmp46
tmp50 = tmp44 | tmp49
tmp51 = tl.full([1], 3, tl.int64)
tmp52 = tmp39 < tmp51
tmp53 = tmp50 & tmp52
tmp54 = tmp48 | tmp53
tl.where(tmp54, tmp38, tmp42)
tmp56 = tl.where(tmp54, tmp39, tmp51)
tmp57 = tmp4 + tmp7
tmp58 = tmp57 + tmp25
tmp59 = tmp58 + tmp42
tmp60 = 4.0
tmp61 = tmp59 / tmp60
tmp62 = triton_helpers.maximum(tmp4, tmp7)
tmp63 = triton_helpers.maximum(tmp62, tmp25)
tmp64 = triton_helpers.maximum(tmp63, tmp42)
tmp65 = tmp61 + tmp64
tl.store(out_ptr0 + x2, tmp56, xmask)
tl.store(out_ptr1 + x2, tmp65, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_max_mean_relu_0[grid(16)](buf0, primals_3,
buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf2, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf0,
primals_3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_3
return buf3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf2, primals_4, reinterpret_tensor(buf1, (1, 4, 4), (16, 4, 1), 0
), buf4
class WordPredictorNew(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim,
topk_labels_per_source_token=None, use_self_attention=False):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.topk_labels_per_source_token = topk_labels_per_source_token
self.use_self_attention = use_self_attention
if self.use_self_attention:
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
else:
self.hidden_layer = nn.Linear(encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def get_topk_predicted_tokens(self, net_output, src_tokens, log_probs:
'bool'):
"""
Get self.topk_labels_per_source_token top predicted words for vocab
reduction (per source token).
"""
assert isinstance(self.topk_labels_per_source_token, int
) and self.topk_labels_per_source_token > 0, 'topk_labels_per_source_token must be a positive int, or None'
k = src_tokens.size(1) * self.topk_labels_per_source_token
probs = self.get_normalized_probs(net_output, log_probs)
_, topk_indices = torch.topk(probs, k, dim=1)
return topk_indices
def forward(self, input_0):
primals_2 = self.hidden_layer.weight
primals_3 = self.hidden_layer.bias
primals_4 = self.output_layer.weight
primals_5 = self.output_layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ROCmSoftwarePlatform/translate | WordPredictor | false | 976 | [
"BSD-3-Clause"
] | 0 | 32a6380d914ebe1a6c38c4992aac9600ed3d9810 | https://github.com/ROCmSoftwarePlatform/translate/tree/32a6380d914ebe1a6c38c4992aac9600ed3d9810 | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
class Model(nn.Module):
def __init__(self, encoder_output_dim, hidden_dim, output_dim,
topk_labels_per_source_token=None, use_self_attention=False):
super().__init__()
self.encoder_output_dim = encoder_output_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.topk_labels_per_source_token = topk_labels_per_source_token
self.use_self_attention = use_self_attention
if self.use_self_attention:
self.init_layer = nn.Linear(encoder_output_dim, encoder_output_dim)
self.attn_layer = nn.Linear(2 * encoder_output_dim, 1)
self.hidden_layer = nn.Linear(2 * encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
else:
self.hidden_layer = nn.Linear(encoder_output_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
def forward(self, encoder_output):
encoder_hiddens, *_ = encoder_output
assert encoder_hiddens.dim()
if self.use_self_attention:
init_state = self._get_init_state(encoder_hiddens)
attn_scores = self._attention(encoder_hiddens, init_state)
attned_state = (encoder_hiddens * attn_scores).sum(0)
pred_input = torch.cat([init_state, attned_state], 1)
pred_hidden = F.relu(self.hidden_layer(pred_input))
logits = self.output_layer(pred_hidden)
else:
hidden = F.relu(self.hidden_layer(encoder_hiddens))
mean_hidden = torch.mean(hidden, 0)
max_hidden = torch.max(hidden, 0)[0]
logits = self.output_layer(mean_hidden + max_hidden)
return logits
def _get_init_state(self, encoder_hiddens):
x = torch.mean(encoder_hiddens, 0)
x = F.relu(self.init_layer(x))
return x
def _attention(self, encoder_hiddens, init_state):
init_state = init_state.unsqueeze(0).expand_as(encoder_hiddens)
attn_input = torch.cat([init_state, encoder_hiddens], 2)
attn_scores = F.relu(self.attn_layer(attn_input))
attn_scores = F.softmax(attn_scores, 0)
return attn_scores
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output
if log_probs:
return F.log_softmax(logits, dim=1)
else:
return F.softmax(logits, dim=1)
def get_topk_predicted_tokens(self, net_output, src_tokens, log_probs:
'bool'):
"""
Get self.topk_labels_per_source_token top predicted words for vocab
reduction (per source token).
"""
assert isinstance(self.topk_labels_per_source_token, int
) and self.topk_labels_per_source_token > 0, 'topk_labels_per_source_token must be a positive int, or None'
k = src_tokens.size(1) * self.topk_labels_per_source_token
probs = self.get_normalized_probs(net_output, log_probs)
_, topk_indices = torch.topk(probs, k, dim=1)
return topk_indices
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
GlobalAvgPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [2]), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, arg0_1, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
in_size = inputs.size()
return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class GlobalAvgPool2dNew(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2dNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Rming/Self-Correction-Human-Parsing | GlobalAvgPool2d | false | 977 | [
"MIT"
] | 0 | c2b711c0a11f3980a8bf4c7a2acf85d80732620a | https://github.com/Rming/Self-Correction-Human-Parsing/tree/c2b711c0a11f3980a8bf4c7a2acf85d80732620a | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super().__init__()
def forward(self, inputs):
in_size = inputs.size()
return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CustomBatchNormManualModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/up/cupno3pzyw77bw3jxggqthxk5iageb5gntveyyc7vpvocufttkij.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sum, aten.mul, aten.var, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# out => add, add_1, div, mul, mul_1, sqrt, sub, sum_1, var
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [0]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.25), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [0]), kwargs = {correction: 0})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mul), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0 = async_compile.triton('triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x4), xmask)
tmp2 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (64 + x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (128 + x5), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (192 + x5), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = tmp1 - tmp10
tmp12 = 4.0
tmp13 = tmp8 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp3 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp5 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp7 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp11 / tmp28
tmp30 = tmp0 * tmp29
tmp32 = tmp30 + tmp31
tl.store(in_out_ptr0 + (x4), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sum, aten.mul, aten.var, aten.sub, aten.add, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0.run(buf1, primals_2, primals_1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf1, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CustomBatchNormManualFunction(torch.autograd.Function):
"""
This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs.
Using torch.autograd.Function allows you to write a custom backward function.
The function will be called from the nn.Module CustomBatchNormManualModule
Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward
pass is done via the backward method.
The forward pass is not called directly but via the apply() method. This makes sure that the context objects
are dealt with correctly. Example:
my_bn_fct = CustomBatchNormManualFunction()
normalized = fct.apply(input, gamma, beta, eps)
"""
@staticmethod
def forward(ctx, input, gamma, beta, eps=1e-05):
"""
Compute the batch normalization
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
input: input tensor of shape (n_batch, n_neurons)
gamma: variance scaling tensor, applied per neuron, shpae (n_neurons)
beta: mean bias tensor, applied per neuron, shpae (n_neurons)
eps: small float added to the variance for stability
Returns:
out: batch-normalized tensor
"""
batch_size = input.shape[0]
mean = 1 / batch_size * torch.sum(input, dim=0)
var = input.var(dim=0, unbiased=False)
norm = (input - mean) / torch.sqrt(var + eps)
out = gamma * norm + beta
ctx.save_for_backward(norm, gamma, var)
ctx.eps = eps
return out
@staticmethod
def backward(ctx, grad_output):
"""
Compute backward pass of the batch normalization.
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
Returns:
out: tuple containing gradients for all input arguments
"""
normalized, gamma, var = ctx.saved_tensors
eps = ctx.eps
B = grad_output.shape[0]
grad_gamma = (grad_output * normalized).sum(0)
grad_beta = torch.sum(grad_output, dim=0)
grad_input = torch.div(gamma, B * torch.sqrt(var + eps)) * (B *
grad_output - grad_beta - grad_gamma * normalized)
return grad_input, grad_gamma, grad_beta, None
class CustomBatchNormManualModule(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
In self.forward the functional version CustomBatchNormManualFunction.forward is called.
The automatic differentiation of PyTorch calls the backward method of this function in the backward pass.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormManualModule object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super(CustomBatchNormManualModule, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.beta = nn.Parameter(torch.zeros(self.n_neurons))
self.gamma = nn.Parameter(torch.ones(self.n_neurons))
def forward(self, input):
"""
Compute the batch normalization via CustomBatchNormManualFunction
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
"""
assert input.shape[1] == self.n_neurons
batch_norm_custom = CustomBatchNormManualFunction()
out = batch_norm_custom.apply(input, self.gamma, self.beta, self.eps)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_neurons': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x4, xmask)
tmp2 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (64 + x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (128 + x5), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (192 + x5), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = tmp1 - tmp10
tmp12 = 4.0
tmp13 = tmp8 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp3 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp5 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp7 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp11 / tmp28
tmp30 = tmp0 * tmp29
tmp32 = tmp30 + tmp31
tl.store(in_out_ptr0 + x4, tmp32, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_mul_sqrt_sub_sum_var_0[grid(256)](buf1,
primals_2, primals_1, primals_3, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf1, primals_1
class CustomBatchNormManualFunction(torch.autograd.Function):
"""
This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs.
Using torch.autograd.Function allows you to write a custom backward function.
The function will be called from the nn.Module CustomBatchNormManualModule
Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward
pass is done via the backward method.
The forward pass is not called directly but via the apply() method. This makes sure that the context objects
are dealt with correctly. Example:
my_bn_fct = CustomBatchNormManualFunction()
normalized = fct.apply(input, gamma, beta, eps)
"""
@staticmethod
def forward(ctx, input, gamma, beta, eps=1e-05):
"""
Compute the batch normalization
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
input: input tensor of shape (n_batch, n_neurons)
gamma: variance scaling tensor, applied per neuron, shpae (n_neurons)
beta: mean bias tensor, applied per neuron, shpae (n_neurons)
eps: small float added to the variance for stability
Returns:
out: batch-normalized tensor
"""
batch_size = input.shape[0]
mean = 1 / batch_size * torch.sum(input, dim=0)
var = input.var(dim=0, unbiased=False)
norm = (input - mean) / torch.sqrt(var + eps)
out = gamma * norm + beta
ctx.save_for_backward(norm, gamma, var)
ctx.eps = eps
return out
@staticmethod
def backward(ctx, grad_output):
"""
Compute backward pass of the batch normalization.
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
Returns:
out: tuple containing gradients for all input arguments
"""
normalized, gamma, var = ctx.saved_tensors
eps = ctx.eps
B = grad_output.shape[0]
grad_gamma = (grad_output * normalized).sum(0)
grad_beta = torch.sum(grad_output, dim=0)
grad_input = torch.div(gamma, B * torch.sqrt(var + eps)) * (B *
grad_output - grad_beta - grad_gamma * normalized)
return grad_input, grad_gamma, grad_beta, None
class CustomBatchNormManualModuleNew(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
In self.forward the functional version CustomBatchNormManualFunction.forward is called.
The automatic differentiation of PyTorch calls the backward method of this function in the backward pass.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormManualModule object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super(CustomBatchNormManualModuleNew, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.beta = nn.Parameter(torch.zeros(self.n_neurons))
self.gamma = nn.Parameter(torch.ones(self.n_neurons))
def forward(self, input_0):
primals_2 = self.beta
primals_3 = self.gamma
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| RaymondKoopmanschap/DL_assignment_code | CustomBatchNormManualModule | false | 978 | [
"MIT"
] | 0 | 68b3290be9fbd6c55433a7585e2cfa18e0f35f5c | https://github.com/RaymondKoopmanschap/DL_assignment_code/tree/68b3290be9fbd6c55433a7585e2cfa18e0f35f5c | import torch
import torch.nn as nn
class CustomBatchNormManualFunction(torch.autograd.Function):
"""
This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs.
Using torch.autograd.Function allows you to write a custom backward function.
The function will be called from the nn.Module CustomBatchNormManualModule
Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward
pass is done via the backward method.
The forward pass is not called directly but via the apply() method. This makes sure that the context objects
are dealt with correctly. Example:
my_bn_fct = CustomBatchNormManualFunction()
normalized = fct.apply(input, gamma, beta, eps)
"""
@staticmethod
def forward(ctx, input, gamma, beta, eps=1e-05):
"""
Compute the batch normalization
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
input: input tensor of shape (n_batch, n_neurons)
gamma: variance scaling tensor, applied per neuron, shpae (n_neurons)
beta: mean bias tensor, applied per neuron, shpae (n_neurons)
eps: small float added to the variance for stability
Returns:
out: batch-normalized tensor
"""
batch_size = input.shape[0]
mean = 1 / batch_size * torch.sum(input, dim=0)
var = input.var(dim=0, unbiased=False)
norm = (input - mean) / torch.sqrt(var + eps)
out = gamma * norm + beta
ctx.save_for_backward(norm, gamma, var)
ctx.eps = eps
return out
@staticmethod
def backward(ctx, grad_output):
"""
Compute backward pass of the batch normalization.
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
Returns:
out: tuple containing gradients for all input arguments
"""
normalized, gamma, var = ctx.saved_tensors
eps = ctx.eps
B = grad_output.shape[0]
grad_gamma = (grad_output * normalized).sum(0)
grad_beta = torch.sum(grad_output, dim=0)
grad_input = torch.div(gamma, B * torch.sqrt(var + eps)) * (B *
grad_output - grad_beta - grad_gamma * normalized)
return grad_input, grad_gamma, grad_beta, None
class Model(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
In self.forward the functional version CustomBatchNormManualFunction.forward is called.
The automatic differentiation of PyTorch calls the backward method of this function in the backward pass.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormManualModule object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super().__init__()
self.n_neurons = n_neurons
self.eps = eps
self.beta = nn.Parameter(torch.zeros(self.n_neurons))
self.gamma = nn.Parameter(torch.ones(self.n_neurons))
def forward(self, input):
"""
Compute the batch normalization via CustomBatchNormManualFunction
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
"""
assert input.shape[1] == self.n_neurons
batch_norm_custom = CustomBatchNormManualFunction()
out = batch_norm_custom.apply(input, self.gamma, self.beta, self.eps)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [h_], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h_ => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xh/cxhi2oxtldw7gsvpajvcvh4iks7iujnefav4smobuv5savmjupdj.py
# Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# std => exp
# z => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %randn), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %mul_1), kwargs = {})
triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp5 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_var], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
# Topologically Sorted Source Nodes: [epsilon], Original ATen: [aten.randn_like]
buf4 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add]
triton_poi_fused_add_exp_mul_1.run(buf2, buf3, buf5, buf6, 256, grid=grid(256), stream=stream0)
return (buf6, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf5, primals_6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Encoder(nn.Module):
def __init__(self, input_dim, hidden_dim, latent_dim):
super(Encoder, self).__init__()
self.FC_input = nn.Linear(input_dim, hidden_dim)
self.FC_mean = nn.Linear(hidden_dim, latent_dim)
self.FC_var = nn.Linear(hidden_dim, latent_dim)
self.training = True
def forward(self, x):
h_ = torch.relu(self.FC_input(x))
mean = self.FC_mean(h_)
log_var = self.FC_var(h_)
std = torch.exp(0.5 * log_var)
z = self.reparameterization(mean, std)
return z, mean, log_var
def reparameterization(self, mean, std):
epsilon = torch.randn_like(std)
z = mean + std * epsilon
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dim': 4, 'latent_dim': 4}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr2 + x0, xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
buf4 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_1[grid(256)](buf2, buf3, buf5, buf6,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf6, reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf5, primals_6, primals_4, buf7
class EncoderNew(nn.Module):
def __init__(self, input_dim, hidden_dim, latent_dim):
super(EncoderNew, self).__init__()
self.FC_input = nn.Linear(input_dim, hidden_dim)
self.FC_mean = nn.Linear(hidden_dim, latent_dim)
self.FC_var = nn.Linear(hidden_dim, latent_dim)
self.training = True
def reparameterization(self, mean, std):
epsilon = torch.randn_like(std)
z = mean + std * epsilon
return z
def forward(self, input_0):
primals_1 = self.FC_input.weight
primals_2 = self.FC_input.bias
primals_4 = self.FC_mean.weight
primals_5 = self.FC_mean.bias
primals_6 = self.FC_var.weight
primals_7 = self.FC_var.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1], output[2]
| RasmusJuul/dtu_mlops | Encoder | false | 979 | [
"Apache-2.0"
] | 0 | 98bca082067aa7575bb8e8193991723d474f0850 | https://github.com/RasmusJuul/dtu_mlops/tree/98bca082067aa7575bb8e8193991723d474f0850 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_dim, hidden_dim, latent_dim):
super().__init__()
self.FC_input = nn.Linear(input_dim, hidden_dim)
self.FC_mean = nn.Linear(hidden_dim, latent_dim)
self.FC_var = nn.Linear(hidden_dim, latent_dim)
self.training = True
def forward(self, x):
h_ = torch.relu(self.FC_input(x))
mean = self.FC_mean(h_)
log_var = self.FC_var(h_)
std = torch.exp(0.5 * log_var)
z = self.reparameterization(mean, std)
return z, mean, log_var
def reparameterization(self, mean, std):
epsilon = torch.randn_like(std)
z = mean + std * epsilon
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Brightness | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/7g/c7geimtdpt65nlqqtkhbbmnvlhudacjtrygpmzvlv5owbl5vxn77.py
# Topologically Sorted Source Nodes: [add, clamp], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# add => mul
# clamp => clamp_max, clamp_min
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.8), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
triton_poi_fused_add_clamp_0 = async_compile.triton('triton_poi_fused_add_clamp_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.8
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 1.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, clamp], Original ATen: [aten.add, aten.clamp]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torchvision import transforms as ttf
class Brightness(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_brightness(img, self.M / 5.0)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'M': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.8
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 1.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class BrightnessNew(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Hayoung93/UDA | Brightness | false | 980 | [
"Apache-2.0"
] | 0 | a587b01c76141d64e7cead55b62e0f3ed75890bf | https://github.com/Hayoung93/UDA/tree/a587b01c76141d64e7cead55b62e0f3ed75890bf | import torch
import torch.nn as nn
from torchvision import transforms as ttf
class Model(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_brightness(img, self.M / 5.0)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
RegKappa | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ix/cixmn7clw6nfzwggunlhdyodrndbmn7qm42pty2mehlyh62xq5vu.py
# Topologically Sorted Source Nodes: [mul, sum_1, num, norm, norm_1, denom, add_1, kappa, sub], Original ATen: [aten.mul, aten.sum, aten.linalg_vector_norm, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add_1 => add_1
# denom => add
# kappa => div
# mul => mul
# norm => pow_1, pow_2, sum_2
# norm_1 => pow_3, pow_4, sum_3
# num => mul_1
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, None), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_3, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, %pow_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-07), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
triton_per_fused_add_div_linalg_vector_norm_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_linalg_vector_norm_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_linalg_vector_norm_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp1 * tmp1
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 2.0
tmp15 = tmp5 * tmp14
tmp16 = libdevice.sqrt(tmp9)
tmp17 = libdevice.sqrt(tmp13)
tmp18 = tmp16 + tmp17
tmp19 = 1e-07
tmp20 = tmp18 + tmp19
tmp21 = tmp15 / tmp20
tmp22 = 1.0
tmp23 = tmp22 - tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sum_1, num, norm, norm_1, denom, add_1, kappa, sub], Original ATen: [aten.mul, aten.sum, aten.linalg_vector_norm, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_mul_rsub_sum_0.run(buf3, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn.modules.loss import _Loss
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class RegKappa(_Loss):
def __init__(self, ignore_index=None):
super(RegKappa, self).__init__()
self.min = min
self.max = max
self.ignore_index = ignore_index
def forward(self, input, target):
if self.ignore_index is not None:
mask = target != self.ignore_index
target = target[mask]
input = input[mask]
target = target.float()
num = 2 * torch.sum(input * target)
denom = input.norm(2) + target.norm(2)
eps = 1e-07
kappa = num / (denom + eps)
return 1.0 - kappa
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn.modules.loss import _Loss
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_linalg_vector_norm_mul_rsub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp1 * tmp1
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 2.0
tmp15 = tmp5 * tmp14
tmp16 = libdevice.sqrt(tmp9)
tmp17 = libdevice.sqrt(tmp13)
tmp18 = tmp16 + tmp17
tmp19 = 1e-07
tmp20 = tmp18 + tmp19
tmp21 = tmp15 / tmp20
tmp22 = 1.0
tmp23 = tmp22 - tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_linalg_vector_norm_mul_rsub_sum_0[grid(1)](
buf3, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class RegKappaNew(_Loss):
def __init__(self, ignore_index=None):
super(RegKappaNew, self).__init__()
self.min = min
self.max = max
self.ignore_index = ignore_index
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Ramstein/Retinopathy2 | RegKappa | false | 981 | [
"MIT"
] | 0 | 669e74206c466e6351d4e3df6087c6aa39b5c6c2 | https://github.com/Ramstein/Retinopathy2/tree/669e74206c466e6351d4e3df6087c6aa39b5c6c2 | import torch
from torch.nn.modules.loss import _Loss
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Model(_Loss):
def __init__(self, ignore_index=None):
super().__init__()
self.min = min
self.max = max
self.ignore_index = ignore_index
def forward(self, input, target):
if self.ignore_index is not None:
mask = target != self.ignore_index
target = target[mask]
input = input[mask]
target = target.float()
num = 2 * torch.sum(input * target)
denom = input.norm(2) + target.norm(2)
eps = 1e-07
kappa = num / (denom + eps)
return 1.0 - kappa
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ScaledL2Norm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/2a/c2ardebf3dijah3sug56cdg3pdrqptfnjdozbx2wvqruxlwmuixz.py
# Topologically Sorted Source Nodes: [normalize, mul], Original ATen: [aten.div, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# normalize => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %unsqueeze_2), kwargs = {})
triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [normalize, mul], Original ATen: [aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_div_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledL2Norm(nn.Module):
def __init__(self, in_channels, initial_scale):
super(ScaledL2Norm, self).__init__()
self.in_channels = in_channels
self.scale = nn.Parameter(torch.Tensor(in_channels))
self.initial_scale = initial_scale
self.reset_parameters()
def forward(self, x):
return F.normalize(x, p=2, dim=1) * self.scale.unsqueeze(0).unsqueeze(2
).unsqueeze(3)
def reset_parameters(self):
self.scale.data.fill_(self.initial_scale)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'initial_scale': 1.0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class ScaledL2NormNew(nn.Module):
def __init__(self, in_channels, initial_scale):
super(ScaledL2NormNew, self).__init__()
self.in_channels = in_channels
self.scale = nn.Parameter(torch.Tensor(in_channels))
self.initial_scale = initial_scale
self.reset_parameters()
def reset_parameters(self):
self.scale.data.fill_(self.initial_scale)
def forward(self, input_0):
primals_2 = self.scale
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| Richard-cpu2333/tx2dl | ScaledL2Norm | false | 982 | [
"Apache-2.0"
] | 0 | 985d9f9f24004271e85745a49252ab9922aec655 | https://github.com/Richard-cpu2333/tx2dl/tree/985d9f9f24004271e85745a49252ab9922aec655 | import torch
import torch.onnx
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_channels, initial_scale):
super().__init__()
self.in_channels = in_channels
self.scale = nn.Parameter(torch.Tensor(in_channels))
self.initial_scale = initial_scale
self.reset_parameters()
def forward(self, x):
return F.normalize(x, p=2, dim=1) * self.scale.unsqueeze(0).unsqueeze(2
).unsqueeze(3)
def reset_parameters(self):
self.scale.data.fill_(self.initial_scale)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 1.0]
|
CosineLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/zk/czk5xfokmwnuegxn53eciq25366p2is3a6lxx47tlosf3q225vha.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
# Source node to ATen node mapping:
# x => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xe/cxewggzrfqe57dzglxrzfhfgpsywlh36utvtdulp5oi75wfs7ml3.py
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.div]
# Source node to ATen node mapping:
# w => div_1
# Graph fragment:
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %expand_1), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.div]
triton_poi_fused_div_1.run(primals_2, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cos_theta], Original ATen: [aten.mm]
extern_kernels.mm(buf0, buf1, out=buf2)
return (buf1, buf2, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
class CosineLinear(nn.Module):
def __init__(self, in_features, out_features):
super(CosineLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
nn.init.xavier_uniform_(self.weight)
def forward(self, input):
x = F.normalize(input, dim=-1)
w = F.normalize(self.weight, dim=0)
cos_theta = x.mm(w)
return w, cos_theta
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, buf1, out=buf2)
return buf1, buf2, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
class CosineLinearNew(nn.Module):
def __init__(self, in_features, out_features):
super(CosineLinearNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
nn.init.xavier_uniform_(self.weight)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0], output[1]
| QingquanBao/Spear-Shield | CosineLinear | false | 983 | [
"Apache-2.0"
] | 0 | d57b8f4412c3d651b6f7e056c9c45cfd0dc950c3 | https://github.com/QingquanBao/Spear-Shield/tree/d57b8f4412c3d651b6f7e056c9c45cfd0dc950c3 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
class Model(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features, out_features))
nn.init.xavier_uniform_(self.weight)
def forward(self, input):
x = F.normalize(input, dim=-1)
w = F.normalize(self.weight, dim=0)
cos_theta = x.mm(w)
return w, cos_theta
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/yd/cydbtjoq352gcolmflbvu2nqkda7xg7q5hnvltb47jsg5dbmubym.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/s2/cs2rk3o3kmhydx4oijp6rsdb5atcrq5axy4adadrpl7gkt7scies.py
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# p_attn => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# p_attn => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [p_attn], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf9, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf11)
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), buf7, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
def combine_heads(X):
"""
Combine heads (the inverse of split heads):
1) Transpose X from (batch size, nheads, sequence length, d_head) to
(batch size, sequence length, nheads, d_head)
2) Combine (reshape) last 2 dimensions (nheads, d_head) into 1 (d_model)
Inputs:
X : [batch size * nheads, sequence length, d_head]
nheads : integer
d_head : integer
Outputs:
[batch_size, seq_len, d_model]
"""
X = X.transpose(1, 2)
nheads, d_head = X.shape[-2:]
return X.contiguous().view(list(X.shape[:-2]) + [nheads * d_head])
def create_src_lengths_mask(batch_size, src_lengths):
max_srclen = src_lengths.max()
src_indices = torch.arange(0, max_srclen).unsqueeze(0).type_as(src_lengths)
src_indices = src_indices.expand(batch_size, max_srclen)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)
return (src_indices < src_lengths).int().detach()
def apply_masks(scores, batch_size, unseen_mask, src_lengths):
seq_len = scores.shape[-1]
sequence_mask = torch.ones(seq_len, seq_len).unsqueeze(0).int()
if unseen_mask:
sequence_mask = torch.tril(torch.ones(seq_len, seq_len), diagonal=0
).unsqueeze(0).int()
if src_lengths is not None:
src_lengths_mask = create_src_lengths_mask(batch_size=batch_size,
src_lengths=src_lengths).unsqueeze(-2)
sequence_mask = sequence_mask & src_lengths_mask
sequence_mask = sequence_mask.unsqueeze(1)
scores = scores.masked_fill(sequence_mask == 0, -np.inf)
return scores
def scaled_dot_prod_attn(query, key, value, unseen_mask=False, src_lengths=None
):
"""
Scaled Dot Product Attention
Implements equation:
Attention(Q, K, V) = softmax(QK^T/\\sqrt{d_k})V
Inputs:
query : [batch size, nheads, sequence length, d_k]
key : [batch size, nheads, sequence length, d_k]
value : [batch size, nheads, sequence length, d_v]
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
Outputs:
attn: [batch size, sequence length, d_v]
Note that in this implementation d_q = d_k = d_v = dim
"""
d_k = query.shape[-1]
scores = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(d_k)
if unseen_mask or src_lengths is not None:
scores = apply_masks(scores=scores, batch_size=query.shape[0],
unseen_mask=unseen_mask, src_lengths=src_lengths)
p_attn = F.softmax(scores, dim=-1)
return torch.matmul(p_attn, value), p_attn
def split_heads(X, nheads):
"""
Split heads:
1) Split (reshape) last dimension (size d_model) into nheads, d_head
2) Transpose X from (batch size, sequence length, nheads, d_head) to
(batch size, nheads, sequence length, d_head)
Inputs:
X : [batch size, sequence length, nheads * d_head]
nheads : integer
Outputs:
[batch size, nheads, sequence length, d_head]
"""
last_dim = X.shape[-1]
assert last_dim % nheads == 0
X_last_dim_split = X.view(list(X.shape[:-1]) + [nheads, last_dim // nheads]
)
return X_last_dim_split.transpose(1, 2)
class MultiheadAttention(nn.Module):
"""
Multiheaded Scaled Dot Product Attention
Implements equation:
MultiHead(Q, K, V) = Concat(head_1,...,head_h)W^O
where head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)
Similarly to the above, d_k = d_v = d_model / h
Inputs
init:
nheads : integer # of attention heads
d_model : model dimensionality
d_head : dimensionality of a single head
forward:
query : [batch size, sequence length, d_model]
key: [batch size, sequence length, d_model]
value: [batch size, sequence length, d_model]
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
Output
result : [batch_size, sequence length, d_model]
"""
def __init__(self, nheads, d_model):
"""Take in model size and number of heads."""
super(MultiheadAttention, self).__init__()
assert d_model % nheads == 0
self.d_head = d_model // nheads
self.nheads = nheads
self.Q_fc = nn.Linear(d_model, d_model, bias=False)
self.K_fc = nn.Linear(d_model, d_model, bias=False)
self.V_fc = nn.Linear(d_model, d_model, bias=False)
self.output_fc = nn.Linear(d_model, d_model, bias=False)
self.attn = None
def forward(self, query, key, value, unseen_mask=False, src_lengths=None):
query = split_heads(self.Q_fc(query), self.nheads)
key = split_heads(self.K_fc(key), self.nheads)
value = split_heads(self.V_fc(value), self.nheads)
x, self.attn = scaled_dot_prod_attn(query=query, key=key, value=
value, unseen_mask=unseen_mask, src_lengths=src_lengths)
x = combine_heads(x)
return self.output_fc(x)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'nheads': 4, 'd_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tmp16 = tl_math.exp(tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, buf3, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, buf4, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 4)](buf2, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_0[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.mm(reinterpret_tensor(buf10, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf11)
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), buf7, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_4, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), primals_7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
def combine_heads(X):
"""
Combine heads (the inverse of split heads):
1) Transpose X from (batch size, nheads, sequence length, d_head) to
(batch size, sequence length, nheads, d_head)
2) Combine (reshape) last 2 dimensions (nheads, d_head) into 1 (d_model)
Inputs:
X : [batch size * nheads, sequence length, d_head]
nheads : integer
d_head : integer
Outputs:
[batch_size, seq_len, d_model]
"""
X = X.transpose(1, 2)
nheads, d_head = X.shape[-2:]
return X.contiguous().view(list(X.shape[:-2]) + [nheads * d_head])
def create_src_lengths_mask(batch_size, src_lengths):
max_srclen = src_lengths.max()
src_indices = torch.arange(0, max_srclen).unsqueeze(0).type_as(src_lengths)
src_indices = src_indices.expand(batch_size, max_srclen)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)
return (src_indices < src_lengths).int().detach()
def apply_masks(scores, batch_size, unseen_mask, src_lengths):
seq_len = scores.shape[-1]
sequence_mask = torch.ones(seq_len, seq_len).unsqueeze(0).int()
if unseen_mask:
sequence_mask = torch.tril(torch.ones(seq_len, seq_len), diagonal=0
).unsqueeze(0).int()
if src_lengths is not None:
src_lengths_mask = create_src_lengths_mask(batch_size=batch_size,
src_lengths=src_lengths).unsqueeze(-2)
sequence_mask = sequence_mask & src_lengths_mask
sequence_mask = sequence_mask.unsqueeze(1)
scores = scores.masked_fill(sequence_mask == 0, -np.inf)
return scores
def scaled_dot_prod_attn(query, key, value, unseen_mask=False, src_lengths=None
):
"""
Scaled Dot Product Attention
Implements equation:
Attention(Q, K, V) = softmax(QK^T/\\sqrt{d_k})V
Inputs:
query : [batch size, nheads, sequence length, d_k]
key : [batch size, nheads, sequence length, d_k]
value : [batch size, nheads, sequence length, d_v]
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
Outputs:
attn: [batch size, sequence length, d_v]
Note that in this implementation d_q = d_k = d_v = dim
"""
d_k = query.shape[-1]
scores = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(d_k)
if unseen_mask or src_lengths is not None:
scores = apply_masks(scores=scores, batch_size=query.shape[0],
unseen_mask=unseen_mask, src_lengths=src_lengths)
p_attn = F.softmax(scores, dim=-1)
return torch.matmul(p_attn, value), p_attn
def split_heads(X, nheads):
"""
Split heads:
1) Split (reshape) last dimension (size d_model) into nheads, d_head
2) Transpose X from (batch size, sequence length, nheads, d_head) to
(batch size, nheads, sequence length, d_head)
Inputs:
X : [batch size, sequence length, nheads * d_head]
nheads : integer
Outputs:
[batch size, nheads, sequence length, d_head]
"""
last_dim = X.shape[-1]
assert last_dim % nheads == 0
X_last_dim_split = X.view(list(X.shape[:-1]) + [nheads, last_dim // nheads]
)
return X_last_dim_split.transpose(1, 2)
class MultiheadAttentionNew(nn.Module):
"""
Multiheaded Scaled Dot Product Attention
Implements equation:
MultiHead(Q, K, V) = Concat(head_1,...,head_h)W^O
where head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)
Similarly to the above, d_k = d_v = d_model / h
Inputs
init:
nheads : integer # of attention heads
d_model : model dimensionality
d_head : dimensionality of a single head
forward:
query : [batch size, sequence length, d_model]
key: [batch size, sequence length, d_model]
value: [batch size, sequence length, d_model]
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
Output
result : [batch_size, sequence length, d_model]
"""
def __init__(self, nheads, d_model):
"""Take in model size and number of heads."""
super(MultiheadAttentionNew, self).__init__()
assert d_model % nheads == 0
self.d_head = d_model // nheads
self.nheads = nheads
self.Q_fc = nn.Linear(d_model, d_model, bias=False)
self.K_fc = nn.Linear(d_model, d_model, bias=False)
self.V_fc = nn.Linear(d_model, d_model, bias=False)
self.output_fc = nn.Linear(d_model, d_model, bias=False)
self.attn = None
def forward(self, input_0, input_1, input_2):
primals_1 = self.Q_fc.weight
primals_3 = self.K_fc.weight
primals_5 = self.V_fc.weight
primals_7 = self.output_fc.weight
primals_2 = input_0
primals_4 = input_1
primals_6 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ROCmSoftwarePlatform/translate | MultiheadAttention | false | 984 | [
"BSD-3-Clause"
] | 0 | 32a6380d914ebe1a6c38c4992aac9600ed3d9810 | https://github.com/ROCmSoftwarePlatform/translate/tree/32a6380d914ebe1a6c38c4992aac9600ed3d9810 | import math
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch.jit
import torch.jit.quantized
import torch.onnx.operators
def combine_heads(X):
"""
Combine heads (the inverse of split heads):
1) Transpose X from (batch size, nheads, sequence length, d_head) to
(batch size, sequence length, nheads, d_head)
2) Combine (reshape) last 2 dimensions (nheads, d_head) into 1 (d_model)
Inputs:
X : [batch size * nheads, sequence length, d_head]
nheads : integer
d_head : integer
Outputs:
[batch_size, seq_len, d_model]
"""
X = X.transpose(1, 2)
nheads, d_head = X.shape[-2:]
return X.contiguous().view(list(X.shape[:-2]) + [nheads * d_head])
def create_src_lengths_mask(batch_size, src_lengths):
max_srclen = src_lengths.max()
src_indices = torch.arange(0, max_srclen).unsqueeze(0).type_as(src_lengths)
src_indices = src_indices.expand(batch_size, max_srclen)
src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)
return (src_indices < src_lengths).int().detach()
def apply_masks(scores, batch_size, unseen_mask, src_lengths):
seq_len = scores.shape[-1]
sequence_mask = torch.ones(seq_len, seq_len).unsqueeze(0).int()
if unseen_mask:
sequence_mask = torch.tril(torch.ones(seq_len, seq_len), diagonal=0
).unsqueeze(0).int()
if src_lengths is not None:
src_lengths_mask = create_src_lengths_mask(batch_size=batch_size,
src_lengths=src_lengths).unsqueeze(-2)
sequence_mask = sequence_mask & src_lengths_mask
sequence_mask = sequence_mask.unsqueeze(1)
scores = scores.masked_fill(sequence_mask == 0, -np.inf)
return scores
def scaled_dot_prod_attn(query, key, value, unseen_mask=False, src_lengths=None
):
"""
Scaled Dot Product Attention
Implements equation:
Attention(Q, K, V) = softmax(QK^T/\\sqrt{d_k})V
Inputs:
query : [batch size, nheads, sequence length, d_k]
key : [batch size, nheads, sequence length, d_k]
value : [batch size, nheads, sequence length, d_v]
unseen_mask: if True, only attend to previous sequence positions
src_lengths_mask: if True, mask padding based on src_lengths
Outputs:
attn: [batch size, sequence length, d_v]
Note that in this implementation d_q = d_k = d_v = dim
"""
d_k = query.shape[-1]
scores = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(d_k)
if unseen_mask or src_lengths is not None:
scores = apply_masks(scores=scores, batch_size=query.shape[0],
unseen_mask=unseen_mask, src_lengths=src_lengths)
p_attn = F.softmax(scores, dim=-1)
return torch.matmul(p_attn, value), p_attn
def split_heads(X, nheads):
"""
Split heads:
1) Split (reshape) last dimension (size d_model) into nheads, d_head
2) Transpose X from (batch size, sequence length, nheads, d_head) to
(batch size, nheads, sequence length, d_head)
Inputs:
X : [batch size, sequence length, nheads * d_head]
nheads : integer
Outputs:
[batch size, nheads, sequence length, d_head]
"""
last_dim = X.shape[-1]
assert last_dim % nheads == 0
X_last_dim_split = X.view(list(X.shape[:-1]) + [nheads, last_dim // nheads]
)
return X_last_dim_split.transpose(1, 2)
class Model(nn.Module):
"""
Multiheaded Scaled Dot Product Attention
Implements equation:
MultiHead(Q, K, V) = Concat(head_1,...,head_h)W^O
where head_i = Attention(QW_i^Q, KW_i^K, VW_i^V)
Similarly to the above, d_k = d_v = d_model / h
Inputs
init:
nheads : integer # of attention heads
d_model : model dimensionality
d_head : dimensionality of a single head
forward:
query : [batch size, sequence length, d_model]
key: [batch size, sequence length, d_model]
value: [batch size, sequence length, d_model]
# ... truncated (>4000 chars) for memory efficiency |
LogisticCumulativeLink | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/cy/ccy7yru2zdy7ixr5e4xphx2c7ryepqm2xt3yzpntkputvlz2y3ua.py
# Topologically Sorted Source Nodes: [getitem_2], Original ATen: [aten.lift_fresh]
# Source node to ATen node mapping:
# getitem_2 => full_default
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([1], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_lift_fresh_0 = async_compile.triton('triton_poi_fused_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=(1,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_lift_fresh_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], 0, tl.int64)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ge/cgeru34cvt6nnywd3qmie5evkuyzszcwjdamafkyhj6upvphaydq.py
# Topologically Sorted Source Nodes: [getitem_3], Original ATen: [aten.lift_fresh]
# Source node to ATen node mapping:
# getitem_3 => full_default_1
# Graph fragment:
# %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([1], -1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_lift_fresh_1 = async_compile.triton('triton_poi_fused_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=(1,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], -1, tl.int64)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ub/cubbhho3d4olluhydb6hq6avsqsjszp5c44cbvmmno7salnl32jt.py
# Topologically Sorted Source Nodes: [link_mat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# link_mat_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%index, %sub_1, %sub_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 12) % 5
x0 = xindex % 3
x3 = (xindex // 60)
x4 = xindex % 12
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x4 + (48*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.sigmoid(tmp7)
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (12 + x4 + (12*((-1) + x2)) + (48*x3)), tmp14 & xmask, other=0.0)
tmp17 = tmp15 - tmp16
tmp18 = tl.sigmoid(tmp17)
tmp19 = tl.load(in_ptr1 + (x4 + (12*((-1) + x2)) + (48*x3)), tmp14 & xmask, other=0.0)
tmp20 = tmp15 - tmp19
tmp21 = tl.sigmoid(tmp20)
tmp22 = tmp18 - tmp21
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tmp0 >= tmp12
tmp26 = tl.full([1], 5, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tl.load(in_ptr0 + (x0), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr1 + (36 + x4 + (48*x3)), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp30 = tmp28 - tmp29
tmp31 = tl.sigmoid(tmp30)
tmp32 = 1.0
tmp33 = tmp32 - tmp31
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp25, tmp33, tmp34)
tmp36 = tl.where(tmp14, tmp24, tmp35)
tmp37 = tl.where(tmp4, tmp10, tmp36)
tl.store(out_ptr0 + (x5), tmp37, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (3, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 3), (48, 12, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [getitem_2], Original ATen: [aten.lift_fresh]
stream0 = get_raw_stream(0)
triton_poi_fused_lift_fresh_0.run(buf0, 1, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((1, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [getitem_3], Original ATen: [aten.lift_fresh]
triton_poi_fused_lift_fresh_1.run(buf1, 1, grid=grid(1), stream=stream0)
buf2 = empty_strided_cuda((4, 5, 4, 3), (60, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [link_mat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_1, primals_2, buf2, 240, grid=grid(240), stream=stream0)
return (buf2, primals_1, primals_2, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 3), (48, 12, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class LogisticCumulativeLink(nn.Module):
"""
Converts a single number to the proportional odds of belonging to a class.
Parameters
----------
num_classes : int
Number of ordered classes to partition the odds into.
init_cutpoints : str (default='ordered')
How to initialize the cutpoints of the model. Valid values are
- ordered : cutpoints are initialized to halfway between each class.
- random : cutpoints are initialized with random values.
"""
def __init__(self, num_classes: 'int', init_cutpoints: 'str'='ordered'
) ->None:
assert num_classes > 2, 'Only use this model if you have 3 or more classes'
super().__init__()
self.num_classes = num_classes
self.init_cutpoints = init_cutpoints
if init_cutpoints == 'ordered':
num_cutpoints = self.num_classes - 1
cutpoints = torch.arange(num_cutpoints).float() - num_cutpoints / 2
self.cutpoints = nn.Parameter(cutpoints)
elif init_cutpoints == 'random':
cutpoints = torch.rand(self.num_classes - 1).sort()[0]
self.cutpoints = nn.Parameter(cutpoints)
else:
raise ValueError(
f'{init_cutpoints} is not a valid init_cutpoints type')
def forward(self, X: 'torch.Tensor') ->torch.Tensor:
"""
Equation (11) from
"On the consistency of ordinal regression methods", Pedregosa et. al.
"""
sigmoids = torch.sigmoid(self.cutpoints - X)
link_mat = sigmoids[:, 1:] - sigmoids[:, :-1]
link_mat = torch.cat((sigmoids[:, [0]], link_mat, 1 - sigmoids[:, [
-1]]), dim=1)
return link_mat
def get_inputs():
return [torch.rand([4, 4, 4, 3])]
def get_init_inputs():
return [[], {'num_classes': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_lift_fresh_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], 0, tl.int64)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp0, None)
@triton.jit
def triton_poi_fused_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], -1, tl.int64)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp0, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 12 % 5
x0 = xindex % 3
x3 = xindex // 60
x4 = xindex % 12
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + (x4 + 48 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.sigmoid(tmp7)
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + x0, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (12 + x4 + 12 * (-1 + x2) + 48 * x3), tmp14 &
xmask, other=0.0)
tmp17 = tmp15 - tmp16
tmp18 = tl.sigmoid(tmp17)
tmp19 = tl.load(in_ptr1 + (x4 + 12 * (-1 + x2) + 48 * x3), tmp14 &
xmask, other=0.0)
tmp20 = tmp15 - tmp19
tmp21 = tl.sigmoid(tmp20)
tmp22 = tmp18 - tmp21
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tmp0 >= tmp12
tl.full([1], 5, tl.int64)
tmp28 = tl.load(in_ptr0 + x0, tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tl.load(in_ptr1 + (36 + x4 + 48 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp30 = tmp28 - tmp29
tmp31 = tl.sigmoid(tmp30)
tmp32 = 1.0
tmp33 = tmp32 - tmp31
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp25, tmp33, tmp34)
tmp36 = tl.where(tmp14, tmp24, tmp35)
tmp37 = tl.where(tmp4, tmp10, tmp36)
tl.store(out_ptr0 + x5, tmp37, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (3,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 3), (48, 12, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_lift_fresh_0[grid(1)](buf0, 1, XBLOCK=1, num_warps
=1, num_stages=1)
buf1 = empty_strided_cuda((1,), (1,), torch.int64)
triton_poi_fused_lift_fresh_1[grid(1)](buf1, 1, XBLOCK=1, num_warps
=1, num_stages=1)
buf2 = empty_strided_cuda((4, 5, 4, 3), (60, 12, 3, 1), torch.float32)
triton_poi_fused_cat_2[grid(240)](primals_1, primals_2, buf2, 240,
XBLOCK=128, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, buf0, buf1
class LogisticCumulativeLinkNew(nn.Module):
"""
Converts a single number to the proportional odds of belonging to a class.
Parameters
----------
num_classes : int
Number of ordered classes to partition the odds into.
init_cutpoints : str (default='ordered')
How to initialize the cutpoints of the model. Valid values are
- ordered : cutpoints are initialized to halfway between each class.
- random : cutpoints are initialized with random values.
"""
def __init__(self, num_classes: 'int', init_cutpoints: 'str'='ordered'
) ->None:
assert num_classes > 2, 'Only use this model if you have 3 or more classes'
super().__init__()
self.num_classes = num_classes
self.init_cutpoints = init_cutpoints
if init_cutpoints == 'ordered':
num_cutpoints = self.num_classes - 1
cutpoints = torch.arange(num_cutpoints).float() - num_cutpoints / 2
self.cutpoints = nn.Parameter(cutpoints)
elif init_cutpoints == 'random':
cutpoints = torch.rand(self.num_classes - 1).sort()[0]
self.cutpoints = nn.Parameter(cutpoints)
else:
raise ValueError(
f'{init_cutpoints} is not a valid init_cutpoints type')
def forward(self, input_0):
primals_1 = self.cutpoints
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| Ramstein/Retinopathy2 | LogisticCumulativeLink | false | 985 | [
"MIT"
] | 0 | 669e74206c466e6351d4e3df6087c6aa39b5c6c2 | https://github.com/Ramstein/Retinopathy2/tree/669e74206c466e6351d4e3df6087c6aa39b5c6c2 | import torch
from torch import nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Model(nn.Module):
"""
Converts a single number to the proportional odds of belonging to a class.
Parameters
----------
num_classes : int
Number of ordered classes to partition the odds into.
init_cutpoints : str (default='ordered')
How to initialize the cutpoints of the model. Valid values are
- ordered : cutpoints are initialized to halfway between each class.
- random : cutpoints are initialized with random values.
"""
def __init__(self, num_classes: 'int', init_cutpoints: 'str'='ordered'
) ->None:
assert num_classes > 2, 'Only use this model if you have 3 or more classes'
super().__init__()
self.num_classes = num_classes
self.init_cutpoints = init_cutpoints
if init_cutpoints == 'ordered':
num_cutpoints = self.num_classes - 1
cutpoints = torch.arange(num_cutpoints).float() - num_cutpoints / 2
self.cutpoints = nn.Parameter(cutpoints)
elif init_cutpoints == 'random':
cutpoints = torch.rand(self.num_classes - 1).sort()[0]
self.cutpoints = nn.Parameter(cutpoints)
else:
raise ValueError(
f'{init_cutpoints} is not a valid init_cutpoints type')
def forward(self, X: 'torch.Tensor') ->torch.Tensor:
"""
Equation (11) from
"On the consistency of ordinal regression methods", Pedregosa et. al.
"""
sigmoids = torch.sigmoid(self.cutpoints - X)
link_mat = sigmoids[:, 1:] - sigmoids[:, :-1]
link_mat = torch.cat((sigmoids[:, [0]], link_mat, 1 - sigmoids[:, [
-1]]), dim=1)
return link_mat
def get_inputs():
return [torch.rand([4, 4, 4, 3])]
def get_init_inputs():
return [4]
|
Beta | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/vl/cvlekk7mg6q4mdm2ff42mapcgym5nn43grvi5dksbd4j5rg2sv57.py
# Topologically Sorted Source Nodes: [softplus, alpha], Original ATen: [aten.softplus, aten.add]
# Source node to ATen node mapping:
# alpha => add
# softplus => exp, gt, log1p, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, 1), kwargs = {})
triton_poi_fused_add_softplus_0 = async_compile.triton('triton_poi_fused_add_softplus_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_softplus_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softplus, alpha], Original ATen: [aten.softplus, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_softplus_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 0, 4, 4), (0, 16, 4, 1), torch.float32)
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class BoundedBeta(torch.distributions.Beta):
def log_prob(self, x):
return super().log_prob((x + 1) / 2)
class Beta(nn.Module):
def __init__(self, action_dim):
super(Beta, self).__init__()
self.action_dim = action_dim
def forward(self, alpha_beta):
alpha = 1 + F.softplus(alpha_beta[:, :self.action_dim])
beta = 1 + F.softplus(alpha_beta[:, self.action_dim:])
return alpha, beta
def sample(self, x, deterministic):
if deterministic is False:
action = self.evaluate(x).sample()
else:
return self.evaluate(x).mean
return 2 * action - 1
def evaluate(self, x):
alpha, beta = self(x)
return BoundedBeta(alpha, beta)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'action_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_softplus_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 0, 4, 4), (0, 16, 4, 1), torch.float32)
return buf0, buf1
class BoundedBeta(torch.distributions.Beta):
def log_prob(self, x):
return super().log_prob((x + 1) / 2)
class BetaNew(nn.Module):
def __init__(self, action_dim):
super(BetaNew, self).__init__()
self.action_dim = action_dim
def sample(self, x, deterministic):
if deterministic is False:
action = self.evaluate(x).sample()
else:
return self.evaluate(x).mean
return 2 * action - 1
def evaluate(self, x):
alpha, beta = self(x)
return BoundedBeta(alpha, beta)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| RohanPankaj/apex | Beta | false | 986 | [
"MIT"
] | 0 | 74e96386bf9446d1179106d6d65ea0368c1b5b27 | https://github.com/RohanPankaj/apex/tree/74e96386bf9446d1179106d6d65ea0368c1b5b27 | import torch
import torch.nn as nn
import torch.nn.functional as F
class BoundedBeta(torch.distributions.Beta):
def log_prob(self, x):
return super().log_prob((x + 1) / 2)
class Model(nn.Module):
def __init__(self, action_dim):
super().__init__()
self.action_dim = action_dim
def forward(self, alpha_beta):
alpha = 1 + F.softplus(alpha_beta[:, :self.action_dim])
beta = 1 + F.softplus(alpha_beta[:, self.action_dim:])
return alpha, beta
def sample(self, x, deterministic):
if deterministic is False:
action = self.evaluate(x).sample()
else:
return self.evaluate(x).mean
return 2 * action - 1
def evaluate(self, x):
alpha, beta = self(x)
return BoundedBeta(alpha, beta)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
L2Norm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ql/cqlak5tz3s7deubsy52az4l7hpzcb4ekrbzbw4nqi6gbd7v3ukso.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, truediv, x], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# norm => add
# pow_1 => pow_1
# sqrt => sqrt
# sum_1 => sum_1
# truediv => div
# x => mul
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-10), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, truediv, x], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class L2Norm(nn.Module):
def __init__(self, n_channels, scale=1.0):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.data += self.scale
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = x / norm * self.weight.view(1, -1, 1, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_1,
primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class L2NormNew(nn.Module):
def __init__(self, n_channels, scale=1.0):
super(L2NormNew, self).__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.data += self.scale
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| Rocketbase-AI/rockets-s3fd | L2Norm | false | 987 | [
"MIT"
] | 0 | 40d978270a6b3ba2d397217ede0c735712814250 | https://github.com/Rocketbase-AI/rockets-s3fd/tree/40d978270a6b3ba2d397217ede0c735712814250 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, n_channels, scale=1.0):
super().__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.data += self.scale
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = x / norm * self.weight.view(1, -1, 1, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
RMSLELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/qk/cqk4s5hcy2ardmucnehpv4eizaoaykawcn6rvxhvx3dncogu2rkf.py
# Topologically Sorted Source Nodes: [add, log, add_1, log_1, mse_loss, sqrt], Original ATen: [aten.add, aten.log, aten.mse_loss, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# log => log
# log_1 => log_1
# mse_loss => mean, pow_1, sub
# sqrt => sqrt
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 1), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%log, %log_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {})
triton_per_fused_add_log_mse_loss_sqrt_0 = async_compile.triton('triton_per_fused_add_log_mse_loss_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mse_loss_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = tmp4 + tmp1
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 256.0
tmp13 = tmp11 / tmp12
tmp14 = libdevice.sqrt(tmp13)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, log, add_1, log_1, mse_loss, sqrt], Original ATen: [aten.add, aten.log, aten.mse_loss, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_add_log_mse_loss_sqrt_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class RMSLELoss(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
def forward(self, pred, actual):
return torch.sqrt(self.mse(torch.log(pred + 1), torch.log(actual + 1)))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = tl_math.log(tmp2)
tmp5 = tmp4 + tmp1
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.broadcast_to(tmp8, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 256.0
tmp13 = tmp11 / tmp12
tmp14 = libdevice.sqrt(tmp13)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mse_loss_sqrt_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class RMSLELossNew(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| RosarioAndolina/psychXRF | RMSLELoss | false | 988 | [
"MIT"
] | 0 | e2adadbd17664d7f74c10304f84b3751c571226e | https://github.com/RosarioAndolina/psychXRF/tree/e2adadbd17664d7f74c10304f84b3751c571226e | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
def forward(self, pred, actual):
return torch.sqrt(self.mse(torch.log(pred + 1), torch.log(actual + 1)))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SVIGlobalMeanPool2D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/7h/c7hbbzvoewj2tsadwgr3uwpf6oz4y2eo5c2i5sb55oyvgaiiusmo.py
# Topologically Sorted Source Nodes: [mean, x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# x => mean_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [4]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [3]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + (x0), tmp36, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SVIGlobalMeanPool2D(nn.Module):
"""
Expects
:param x: [examples, samples, channels, H, W]
:return: [examples, samples, channels]
"""
def __init__(self):
super(SVIGlobalMeanPool2D, self).__init__()
def forward(self, x):
x = x.mean(4).mean(3)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 + tmp16
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp24 = tmp22 + tmp23
tmp25 = tmp24 / tmp7
tmp26 = tmp17 + tmp25
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tmp33 = tmp31 + tmp32
tmp34 = tmp33 / tmp7
tmp35 = tmp26 + tmp34
tmp36 = tmp35 / tmp7
tl.store(out_ptr0 + x0, tmp36, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class SVIGlobalMeanPool2DNew(nn.Module):
"""
Expects
:param x: [examples, samples, channels, H, W]
:return: [examples, samples, channels]
"""
def __init__(self):
super(SVIGlobalMeanPool2DNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| RomanShen/radial-bnn | SVIGlobalMeanPool2D | false | 989 | [
"Apache-2.0"
] | 0 | 7c8bc85397c1461a6fd5ea9adf0631f9ade27f6c | https://github.com/RomanShen/radial-bnn/tree/7c8bc85397c1461a6fd5ea9adf0631f9ade27f6c | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Expects
:param x: [examples, samples, channels, H, W]
:return: [examples, samples, channels]
"""
def __init__(self):
super().__init__()
def forward(self, x):
x = x.mean(4).mean(3)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return []
|
MAPE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/cb/ccbe5dxv34jxbymjakhumsda4ukmqsisffc2gw24t4kvazdrzarn.py
# Topologically Sorted Source Nodes: [l1_loss, mul, max_1, mape, mean], Original ATen: [aten.sub, aten.abs, aten.mul, aten.maximum, aten.div, aten.mean]
# Source node to ATen node mapping:
# l1_loss => abs_1, sub
# mape => div
# max_1 => maximum
# mean => mean
# mul => mul
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 100), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%arg1_1, %arg0_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %maximum), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
triton_per_fused_abs_div_maximum_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_div_maximum_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_div_maximum_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_div_maximum_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 100.0
tmp5 = tmp3 * tmp4
tmp6 = triton_helpers.maximum(tmp0, tmp1)
tmp7 = tmp5 / tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [l1_loss, mul, max_1, mape, mean], Original ATen: [aten.sub, aten.abs, aten.mul, aten.maximum, aten.div, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_div_maximum_mean_mul_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MAPE(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.L1Loss(reduction='none')
def forward(self, pred, actual):
mape = 100 * self.l1(pred, actual) / torch.max(pred, actual)
return mape.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_div_maximum_mean_mul_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 100.0
tmp5 = tmp3 * tmp4
tmp6 = triton_helpers.maximum(tmp0, tmp1)
tmp7 = tmp5 / tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_div_maximum_mean_mul_sub_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MAPENew(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.L1Loss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| RosarioAndolina/psychXRF | MAPE | false | 990 | [
"MIT"
] | 0 | e2adadbd17664d7f74c10304f84b3751c571226e | https://github.com/RosarioAndolina/psychXRF/tree/e2adadbd17664d7f74c10304f84b3751c571226e | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.l1 = nn.L1Loss(reduction='none')
def forward(self, pred, actual):
mape = 100 * self.l1(pred, actual) / torch.max(pred, actual)
return mape.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/5w/c5wnubyijcgstpnbhnht5ommr737mwfx67lgpfc6mvwlwmhzfkmq.py
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# q_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qa/cqazar4hg4rdjbxm7zr5mix2w3dkhfmvvjksn7c6lktr5yfe6ndy.py
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_3 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/c2/cc2wsialcqiknwetscnqy3fzaqmmib3cxfb7tsfjx7hdlsxbdq7s.py
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# attn_weights_4 => div_1
# sum_1 => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_12, [1]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_poi_fused_div_sum_3 = async_compile.triton('triton_poi_fused_div_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (256*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + (256*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + (256*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf4, buf7, 64, 16, grid=grid(64), stream=stream0)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf8, buf9, 4, 16, grid=grid(4, 16), stream=stream0)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_3.run(buf7, buf11, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q = q * self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
src_len = k.size(1)
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
try:
attn_weights += attn_mask.unsqueeze(0)
except:
None
None
assert False
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(
attn_weights)
attn_weights = F.dropout(attn_weights, p=self.attn_dropout,
training=self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query, **kwargs):
return self._in_proj(query, end=self.embed_dim, **kwargs)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None, **kwargs):
weight = kwargs.get('weight', self.in_proj_weight)
bias = kwargs.get('bias', self.in_proj_bias)
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf4, buf7, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf4
buf8 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf2, (16, 16, 1), (1,
16, 1), 0), out=buf8)
buf9 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(4, 16)](buf8, buf9, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (16, 4), (4, 1), 0)
del buf8
extern_kernels.addmm(primals_7, reinterpret_tensor(buf9, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf10)
del primals_7
buf11 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_div_sum_3[grid(256)](buf7, buf11, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0
), buf11, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf7, reinterpret_tensor(buf9, (16, 4), (4, 1), 0
), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0
), reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query, **kwargs):
return self._in_proj(query, end=self.embed_dim, **kwargs)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None, **kwargs):
weight = kwargs.get('weight', self.in_proj_weight)
bias = kwargs.get('bias', self.in_proj_bias)
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_proj_weight
primals_5 = self.in_proj_bias
primals_6 = self.out_proj.weight
primals_7 = self.out_proj.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| Redaimao/RERD | MultiheadAttention | false | 991 | [
"MIT"
] | 0 | 40413d4b6743f3e5db0c30ee969d45661d001834 | https://github.com/Redaimao/RERD/tree/40413d4b6743f3e5db0c30ee969d45661d001834 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
class Model(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
self.register_parameter('in_proj_bias', None)
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q = q * self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
src_len = k.size(1)
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
# ... truncated (>4000 chars) for memory efficiency |
SVIGlobalMaxPool2D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/2j/c2jekx4hpgefr4mgbvd6osfagralcyjkcmltymaagoikymiaqihb.py
# Topologically Sorted Source Nodes: [max_2], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_2 => getitem_2
# Graph fragment:
# %getitem_2 : [num_users=1] = call_function[target=operator.getitem](args = (%max_2, 0), kwargs = {})
triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = triton_helpers.maximum(tmp6, tmp13)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = triton_helpers.maximum(tmp14, tmp21)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp22, tmp29)
tl.store(out_ptr0 + (x0), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_2], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SVIGlobalMaxPool2D(nn.Module):
"""
Expects
:param x: [examples, samples, channels, H, W]
:return: [examples, samples, channels]
"""
def __init__(self):
super(SVIGlobalMaxPool2D, self).__init__()
def forward(self, x):
x = x.max(4)[0].max(3)[0]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp8 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp18 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp14 = triton_helpers.maximum(tmp6, tmp13)
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = triton_helpers.maximum(tmp14, tmp21)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp27 = triton_helpers.maximum(tmp25, tmp26)
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = triton_helpers.maximum(tmp22, tmp29)
tl.store(out_ptr0 + x0, tmp30, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class SVIGlobalMaxPool2DNew(nn.Module):
"""
Expects
:param x: [examples, samples, channels, H, W]
:return: [examples, samples, channels]
"""
def __init__(self):
super(SVIGlobalMaxPool2DNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| RomanShen/radial-bnn | SVIGlobalMaxPool2D | false | 992 | [
"Apache-2.0"
] | 0 | 7c8bc85397c1461a6fd5ea9adf0631f9ade27f6c | https://github.com/RomanShen/radial-bnn/tree/7c8bc85397c1461a6fd5ea9adf0631f9ade27f6c | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Expects
:param x: [examples, samples, channels, H, W]
:return: [examples, samples, channels]
"""
def __init__(self):
super().__init__()
def forward(self, x):
x = x.max(4)[0].max(3)[0]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return []
|
Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/do/cdo22no4lmipk7byduyah2xsadvdcbfr22puoptl5br3l66r6jra.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf3
del primals_5
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as f
from torch import nn
class Critic(nn.Module):
def __init__(self, input_dim):
super(Critic, self).__init__()
self._input_dim = input_dim
self.dense1 = nn.Linear(self._input_dim, self._input_dim)
self.dense2 = nn.Linear(self._input_dim, self._input_dim)
def forward(self, x):
x = f.leaky_relu(self.dense1(x))
x = f.leaky_relu(self.dense2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_0[grid(256)](buf3, primals_5, buf4,
buf5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_5
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf4, primals_4
class CriticNew(nn.Module):
def __init__(self, input_dim):
super(CriticNew, self).__init__()
self._input_dim = input_dim
self.dense1 = nn.Linear(self._input_dim, self._input_dim)
self.dense2 = nn.Linear(self._input_dim, self._input_dim)
def forward(self, input_0):
primals_1 = self.dense1.weight
primals_2 = self.dense1.bias
primals_4 = self.dense2.weight
primals_5 = self.dense2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| RosalRicardo/RTRGAN | Critic | false | 993 | [
"MIT"
] | 0 | 6f4551ab8445367f8b9c711f41f15dd465abaef1 | https://github.com/RosalRicardo/RTRGAN/tree/6f4551ab8445367f8b9c711f41f15dd465abaef1 | import torch
import torch.nn.functional as f
from torch import nn
class Model(nn.Module):
def __init__(self, input_dim):
super().__init__()
self._input_dim = input_dim
self.dense1 = nn.Linear(self._input_dim, self._input_dim)
self.dense2 = nn.Linear(self._input_dim, self._input_dim)
def forward(self, x):
x = f.leaky_relu(self.dense1(x))
x = f.leaky_relu(self.dense2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
R2Score | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/np/cnp5g4we4zgdssn4oqr2jughmvlqv5fahhbwtqzc562xeba2dzqq.py
# Topologically Sorted Source Nodes: [sub, pow_1, rss, ym, sub_1, pow_2, tss, truediv, sub_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mean, aten.div, aten.rsub]
# Source node to ATen node mapping:
# pow_1 => pow_1
# pow_2 => pow_2
# rss => sum_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# truediv => div
# tss => sum_2
# ym => mean
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%arg0_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_div_mean_pow_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_div_mean_pow_rsub_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_pow_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mean_pow_rsub_sub_sum_0(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.broadcast_to(tmp0, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tmp12 = tmp0 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp6 / tmp16
tmp18 = 1.0
tmp19 = tmp18 - tmp17
tl.debug_barrier()
tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, rss, ym, sub_1, pow_2, tss, truediv, sub_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mean, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_div_mean_pow_rsub_sub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class R2Score(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred, actual):
rss = ((actual - pred) ** 2).sum()
ym = actual.mean()
tss = ((actual - ym) ** 2).sum()
return 1 - rss / tss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_mean_pow_rsub_sub_sum_0(in_out_ptr1, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.broadcast_to(tmp0, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tmp12 = tmp0 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp6 / tmp16
tmp18 = 1.0
tmp19 = tmp18 - tmp17
tl.debug_barrier()
tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_div_mean_pow_rsub_sub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class R2ScoreNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| RosarioAndolina/psychXRF | R2Score | false | 994 | [
"MIT"
] | 0 | e2adadbd17664d7f74c10304f84b3751c571226e | https://github.com/RosarioAndolina/psychXRF/tree/e2adadbd17664d7f74c10304f84b3751c571226e | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred, actual):
rss = ((actual - pred) ** 2).sum()
ym = actual.mean()
tss = ((actual - ym) ** 2).sum()
return 1 - rss / tss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/rh/crhy6nilvaajphuuoyup37xl4ncuiyrcb3fnt5aboux6wyvcg7ie.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xl/cxldlhjpfliyaeswhsohcdhtqevqxjlvece7kkxd6sy4o7gkfgo3.py
# Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# scores_1 => amax, div_1, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_11, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_11, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mz/cmzlu2lip25blpsdqeby7ek5757op6xw3pdkxbdediou5szw32tx.py
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# linear_3 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_15,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ok/cokamvfj3z4xuz3jmalftfns3huimimr3c4gzm52vaybmdliglu4.py
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# linear_3 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_11), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf5, buf8, 256, 16, grid=grid(256), stream=stream0)
del buf5
buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_8, buf9, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf10, buf11, 64, 4, grid=grid(64, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf13, primals_11, 256, grid=grid(256), stream=stream0)
del primals_11
return (buf13, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, p_drop=0.1):
super(MultiHeadAttention, self).__init__()
assert d_model % n_heads == 0
self.d_model = d_model
self.n_heads = n_heads
self.d_hidden = d_model // n_heads
self.q_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(p_drop)
self.fc = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
q = self.q_linear(q).view(bs, -1, self.n_heads, self.d_hidden)
k = self.k_linear(k).view(bs, -1, self.n_heads, self.d_hidden)
v = self.v_linear(v).view(bs, -1, self.n_heads, self.d_hidden)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
scores = torch.matmul(q, k.transpose(-2, -1)) / np.sqrt(self.d_hidden)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask is False, -1000000000.0)
scores = F.softmax(scores, dim=-1)
attn = torch.matmul(scores, v)
concat = attn.transpose(1, 2).reshape(bs, -1, self.d_model)
concat = self.dropout(concat)
return self.fc(concat)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'n_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused__softmax_1[grid(256)](buf5, buf8, 256, 16, XBLOCK=
32, num_warps=4, num_stages=1)
del buf5
buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf9, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0),
out=buf10)
buf11 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0)
del buf12
triton_poi_fused_add_3[grid(256)](buf13, primals_11, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
return buf13, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, d_model, n_heads, p_drop=0.1):
super(MultiHeadAttentionNew, self).__init__()
assert d_model % n_heads == 0
self.d_model = d_model
self.n_heads = n_heads
self.d_hidden = d_model // n_heads
self.q_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(p_drop)
self.fc = nn.Linear(d_model, d_model)
def forward(self, input_0, input_1, input_2):
primals_2 = self.q_linear.weight
primals_3 = self.q_linear.bias
primals_4 = self.k_linear.weight
primals_5 = self.k_linear.bias
primals_7 = self.v_linear.weight
primals_8 = self.v_linear.bias
primals_10 = self.fc.weight
primals_11 = self.fc.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| RegiusQuant/nlp-practice | MultiHeadAttention | false | 995 | [
"MIT"
] | 0 | ffa99aa585134941aa148da11775c2b16d42eef7 | https://github.com/RegiusQuant/nlp-practice/tree/ffa99aa585134941aa148da11775c2b16d42eef7 | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, d_model, n_heads, p_drop=0.1):
super().__init__()
assert d_model % n_heads == 0
self.d_model = d_model
self.n_heads = n_heads
self.d_hidden = d_model // n_heads
self.q_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(p_drop)
self.fc = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
q = self.q_linear(q).view(bs, -1, self.n_heads, self.d_hidden)
k = self.k_linear(k).view(bs, -1, self.n_heads, self.d_hidden)
v = self.v_linear(v).view(bs, -1, self.n_heads, self.d_hidden)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
scores = torch.matmul(q, k.transpose(-2, -1)) / np.sqrt(self.d_hidden)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask is False, -1000000000.0)
scores = F.softmax(scores, dim=-1)
attn = torch.matmul(scores, v)
concat = attn.transpose(1, 2).reshape(bs, -1, self.d_model)
concat = self.dropout(concat)
return self.fc(concat)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ACNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/5y/c5yq7wkgmmcygrawripwacy566sggsmh2mzk5izw35wk7ferohhu.py
# Topologically Sorted Source Nodes: [pi], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# pi => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 100), (100, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (100, 4), (4, 1))
assert_size_stride(primals_7, (100, ), (1, ))
assert_size_stride(primals_8, (1, 100), (100, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool)
# Topologically Sorted Source Nodes: [pi], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 6400, grid=grid(6400), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [actions], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 100), (1, 4), 0), out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf3 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf4, primals_7, buf7, 6400, grid=grid(6400), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [values], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (64, 100), (100, 1), 0), reinterpret_tensor(primals_8, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf6)
del primals_9
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(buf4, (64, 100), (100, 1), 0), primals_8, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def from_numpy(np_array, dtype=np.float32):
if np_array.dtype != dtype:
np_array = np_array.astype(dtype)
return Variable(torch.from_numpy(np_array))
class ACNet(nn.Module):
"""
V: s -> r(scalar)
Pi: s -> distribution of action
"""
def __init__(self, s_dim, a_dim):
super(ACNet, self).__init__()
self.s_dim = s_dim
self.a_dim = a_dim
self.v1 = nn.Linear(s_dim, 100)
self.v2 = nn.Linear(100, 1)
self.pi1 = nn.Linear(s_dim, 100)
self.pi2 = nn.Linear(100, a_dim)
self.dist_cate = torch.distributions.Categorical
def forward(self, state):
pi = F.relu(self.pi1(state))
actions = self.pi2(pi)
v = F.relu(self.v1(state))
values = self.v2(v)
return actions, values
def loss_func(self, s, a, v_targets):
self.train()
logits, values = self.forward(s)
td = v_targets - values
c_loss = td.pow(2)
action_pb = F.softmax(logits, dim=1)
pi_dist = self.dist_cate(action_pb)
a_loss = -pi_dist.log_prob(a) * td.detach()
total_loss = (c_loss + a_loss).mean()
return total_loss
def choose_action(self, state):
"""
state : single state
"""
self.eval()
states = torch.unsqueeze(from_numpy(state), dim=0)
actions, _ = self.forward(states)
pb = F.softmax(actions, dim=1).data
return self.dist_cate(pb).sample().numpy()[0]
def update(self, opt, s_t, states, actions, rs, done):
"""
n-step learning,
s_t: last state
states : state t,t+step
actions : action t ,t+step
rs : rewards
done :last state is done?
"""
if done:
R = 0
else:
s_t = torch.unsqueeze(from_numpy(s_t[None, :]), 0)
R = self.forward(s_t)[-1].data.numpy()[0]
v_target = []
for r in rs[::-1]:
R = r + GAMMA * R
v_target.append(R)
v_target.reverse()
v_target = from_numpy(np.stack(v_target, 0))
loss = self.loss_func(states, actions, v_target)
opt.zero_grad()
loss.backward()
opt.step()
@classmethod
def training(cls, env):
ac = ACNet(N_S, N_A)
opt = torch.optim.Adam(ac.parameters())
for epi in range(2005):
s = env.reset()
ab, sb, rb = [], [], []
total_r = 0
for i in range(200):
if epi > 2000:
env.render()
a = ac.choose_action(s)
s_, r, done, _ = env.step(a)
if done:
r = -1
ab.append(a)
sb.append(s)
rb.append(r)
total_r += r
if i % STEP == 0 and i > 0:
ab = from_numpy(np.stack(ab), dtype=np.int64)
sb = from_numpy(np.stack(sb))
ac.update(opt, s_, sb, ab, rb, done)
ab, sb, rb = [], [], []
s = s_
if done:
break
if epi % 20 == 0:
None
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'s_dim': 4, 'a_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 100), (100, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (100, 4), (4, 1))
assert_size_stride(primals_7, (100,), (1,))
assert_size_stride(primals_8, (1, 100), (100, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf1,
primals_2, buf8, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 100),
(100, 1), 0), reinterpret_tensor(primals_4, (100, 4), (1, 100),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 100), (1, 4), 0), out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf4,
primals_7, buf7, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (64, 100),
(100, 1), 0), reinterpret_tensor(primals_8, (100, 1), (1, 100),
0), alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 100), (100, 1), 0
), reinterpret_tensor(buf4, (64, 100), (100, 1), 0
), primals_8, buf7, primals_4, buf8
def from_numpy(np_array, dtype=np.float32):
if np_array.dtype != dtype:
np_array = np_array.astype(dtype)
return Variable(torch.from_numpy(np_array))
class ACNetNew(nn.Module):
"""
V: s -> r(scalar)
Pi: s -> distribution of action
"""
def __init__(self, s_dim, a_dim):
super(ACNetNew, self).__init__()
self.s_dim = s_dim
self.a_dim = a_dim
self.v1 = nn.Linear(s_dim, 100)
self.v2 = nn.Linear(100, 1)
self.pi1 = nn.Linear(s_dim, 100)
self.pi2 = nn.Linear(100, a_dim)
self.dist_cate = torch.distributions.Categorical
def loss_func(self, s, a, v_targets):
self.train()
logits, values = self.forward(s)
td = v_targets - values
c_loss = td.pow(2)
action_pb = F.softmax(logits, dim=1)
pi_dist = self.dist_cate(action_pb)
a_loss = -pi_dist.log_prob(a) * td.detach()
total_loss = (c_loss + a_loss).mean()
return total_loss
def choose_action(self, state):
"""
state : single state
"""
self.eval()
states = torch.unsqueeze(from_numpy(state), dim=0)
actions, _ = self.forward(states)
pb = F.softmax(actions, dim=1).data
return self.dist_cate(pb).sample().numpy()[0]
def update(self, opt, s_t, states, actions, rs, done):
"""
n-step learning,
s_t: last state
states : state t,t+step
actions : action t ,t+step
rs : rewards
done :last state is done?
"""
if done:
R = 0
else:
s_t = torch.unsqueeze(from_numpy(s_t[None, :]), 0)
R = self.forward(s_t)[-1].data.numpy()[0]
v_target = []
for r in rs[::-1]:
R = r + GAMMA * R
v_target.append(R)
v_target.reverse()
v_target = from_numpy(np.stack(v_target, 0))
loss = self.loss_func(states, actions, v_target)
opt.zero_grad()
loss.backward()
opt.step()
@classmethod
def training(cls, env):
ac = ACNetNew(N_S, N_A)
opt = torch.optim.Adam(ac.parameters())
for epi in range(2005):
s = env.reset()
ab, sb, rb = [], [], []
total_r = 0
for i in range(200):
if epi > 2000:
env.render()
a = ac.choose_action(s)
s_, r, done, _ = env.step(a)
if done:
r = -1
ab.append(a)
sb.append(s)
rb.append(r)
total_r += r
if i % STEP == 0 and i > 0:
ab = from_numpy(np.stack(ab), dtype=np.int64)
sb = from_numpy(np.stack(sb))
ac.update(opt, s_, sb, ab, rb, done)
ab, sb, rb = [], [], []
s = s_
if done:
break
if epi % 20 == 0:
None
def forward(self, input_0):
primals_1 = self.v1.weight
primals_2 = self.v1.bias
primals_8 = self.v2.weight
primals_9 = self.v2.bias
primals_6 = self.pi1.weight
primals_7 = self.pi1.bias
primals_4 = self.pi2.weight
primals_5 = self.pi2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| RocksonZeta/ac | ACNet | false | 996 | [
"MIT"
] | 0 | 050a5cd176864cc2e1f7c376045c3342a7f93221 | https://github.com/RocksonZeta/ac/tree/050a5cd176864cc2e1f7c376045c3342a7f93221 | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def from_numpy(np_array, dtype=np.float32):
if np_array.dtype != dtype:
np_array = np_array.astype(dtype)
return Variable(torch.from_numpy(np_array))
class Model(nn.Module):
"""
V: s -> r(scalar)
Pi: s -> distribution of action
"""
def __init__(self, s_dim, a_dim):
super().__init__()
self.s_dim = s_dim
self.a_dim = a_dim
self.v1 = nn.Linear(s_dim, 100)
self.v2 = nn.Linear(100, 1)
self.pi1 = nn.Linear(s_dim, 100)
self.pi2 = nn.Linear(100, a_dim)
self.dist_cate = torch.distributions.Categorical
def forward(self, state):
pi = F.relu(self.pi1(state))
actions = self.pi2(pi)
v = F.relu(self.v1(state))
values = self.v2(v)
return actions, values
def loss_func(self, s, a, v_targets):
self.train()
logits, values = self.forward(s)
td = v_targets - values
c_loss = td.pow(2)
action_pb = F.softmax(logits, dim=1)
pi_dist = self.dist_cate(action_pb)
a_loss = -pi_dist.log_prob(a) * td.detach()
total_loss = (c_loss + a_loss).mean()
return total_loss
def choose_action(self, state):
"""
state : single state
"""
self.eval()
states = torch.unsqueeze(from_numpy(state), dim=0)
actions, _ = self.forward(states)
pb = F.softmax(actions, dim=1).data
return self.dist_cate(pb).sample().numpy()[0]
def update(self, opt, s_t, states, actions, rs, done):
"""
n-step learning,
s_t: last state
states : state t,t+step
actions : action t ,t+step
rs : rewards
done :last state is done?
"""
if done:
R = 0
else:
s_t = torch.unsqueeze(from_numpy(s_t[None, :]), 0)
R = self.forward(s_t)[-1].data.numpy()[0]
v_target = []
for r in rs[::-1]:
R = r + GAMMA * R
v_target.append(R)
v_target.reverse()
v_target = from_numpy(np.stack(v_target, 0))
loss = self.loss_func(states, actions, v_target)
opt.zero_grad()
loss.backward()
opt.step()
@classmethod
def training(cls, env):
ac = ACNet(N_S, N_A)
opt = torch.optim.Adam(ac.parameters())
for epi in range(2005):
s = env.reset()
ab, sb, rb = [], [], []
total_r = 0
for i in range(200):
if epi > 2000:
env.render()
a = ac.choose_action(s)
s_, r, done, _ = env.step(a)
if done:
r = -1
ab.append(a)
sb.append(s)
rb.append(r)
total_r += r
if i % STEP == 0 and i > 0:
ab = from_numpy(np.stack(ab), dtype=np.int64)
sb = from_numpy(np.stack(sb))
ac.update(opt, s_, sb, ab, rb, done)
ab, sb, rb = [], [], []
s = s_
if done:
break
if epi % 20 == 0:
None
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BReLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/4j/c4jkdxo2rtkxhqp5bq4jc27wn42tb3xs5u3tl5q64rc4ovyvgovr.py
# Topologically Sorted Source Nodes: [relu, add], Original ATen: [aten.relu, aten.add]
# Source node to ATen node mapping:
# add => add
# relu => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %arg1_1), kwargs = {})
triton_poi_fused_add_relu_0 = async_compile.triton('triton_poi_fused_add_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp5 = tmp4.to(tl.float32)
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu, add], Original ATen: [aten.relu, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((), (), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BReLU(nn.Module):
"""
Biased ReLU
BReLU(x) = ReLU(x) + b
Shape:
-Input: (N, *)
-Output: (N, *), same shape as the input
Parameters:
-in_features: number of input features
-b: fixed parameter (bias like for relu)
Examples:
>>> input = torch.randn(300, 6)
>>> afunc = BReLU(input.shape[1], b = 1.0e-8)
>>> x = afunc(input)
"""
def __init__(self, in_features, b):
super(BReLU, self).__init__()
self.in_features = in_features
self.b = nn.Parameter(torch.tensor(b), requires_grad=False)
self.relu = nn.ReLU()
def forward(self, x):
return self.relu(x) + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'b': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp5 = tmp4.to(tl.float32)
tmp6 = tmp2 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(256)](arg0_1, arg1_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class BReLUNew(nn.Module):
"""
Biased ReLU
BReLU(x) = ReLU(x) + b
Shape:
-Input: (N, *)
-Output: (N, *), same shape as the input
Parameters:
-in_features: number of input features
-b: fixed parameter (bias like for relu)
Examples:
>>> input = torch.randn(300, 6)
>>> afunc = BReLU(input.shape[1], b = 1.0e-8)
>>> x = afunc(input)
"""
def __init__(self, in_features, b):
super(BReLUNew, self).__init__()
self.in_features = in_features
self.b = nn.Parameter(torch.tensor(b), requires_grad=False)
self.relu = nn.ReLU()
def forward(self, input_0):
arg1_1 = self.b
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
| RosarioAndolina/psychXRF | BReLU | false | 997 | [
"MIT"
] | 0 | e2adadbd17664d7f74c10304f84b3751c571226e | https://github.com/RosarioAndolina/psychXRF/tree/e2adadbd17664d7f74c10304f84b3751c571226e | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Biased ReLU
BReLU(x) = ReLU(x) + b
Shape:
-Input: (N, *)
-Output: (N, *), same shape as the input
Parameters:
-in_features: number of input features
-b: fixed parameter (bias like for relu)
Examples:
>>> input = torch.randn(300, 6)
>>> afunc = BReLU(input.shape[1], b = 1.0e-8)
>>> x = afunc(input)
"""
def __init__(self, in_features, b):
super().__init__()
self.in_features = in_features
self.b = nn.Parameter(torch.tensor(b), requires_grad=False)
self.relu = nn.ReLU()
def forward(self, x):
return self.relu(x) + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Beta2 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/up/cup6o5cbzbunxrgsjove7ghgc4dkxlfpf5zncfpxjlrgo5zjrjpe.py
# Topologically Sorted Source Nodes: [mean, sub_2, exp, var, truediv_1, mul_1, sub_3, sub, truediv, pow_2, mul, alpha, beta], Original ATen: [aten.sigmoid, aten.rsub, aten.exp, aten.pow, aten.div, aten.mul, aten.sub]
# Source node to ATen node mapping:
# alpha => sub_1
# beta => sub_4
# exp => exp
# mean => sigmoid
# mul => mul
# mul_1 => mul_1
# pow_2 => pow_2
# sub => sub
# sub_2 => sub_2
# sub_3 => sub_3
# truediv => div
# truediv_1 => div_1
# var => pow_1
# Graph fragment:
# %sigmoid : [num_users=5] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg1_1,), kwargs = {})
# %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%exp, 2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %pow_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sigmoid), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %pow_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %pow_2), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sigmoid), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_3, %sub_1), kwargs = {})
triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp5
tmp7 = tmp3 / tmp6
tmp8 = tmp1 * tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp9 - tmp1
tmp11 = tmp7 * tmp1
tmp12 = tmp11 - tmp2
tmp13 = tmp12 - tmp10
tl.store(out_ptr0 + (x2), tmp10, xmask)
tl.store(out_ptr1 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub_2, exp, var, truediv_1, mul_1, sub_3, sub, truediv, pow_2, mul, alpha, beta], Original ATen: [aten.sigmoid, aten.rsub, aten.exp, aten.pow, aten.div, aten.mul, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0.run(arg0_1, arg1_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class BoundedBeta(torch.distributions.Beta):
def log_prob(self, x):
return super().log_prob((x + 1) / 2)
class Beta2(nn.Module):
def __init__(self, action_dim, init_std=0.25, learn_std=False):
super(Beta2, self).__init__()
assert init_std < 0.5, 'Beta distribution has a max std dev of 0.5'
self.action_dim = action_dim
self.logstd = nn.Parameter(torch.ones(1, action_dim) * np.log(
init_std), requires_grad=learn_std)
self.learn_std = learn_std
def forward(self, x):
mean = torch.sigmoid(x)
var = self.logstd.exp().pow(2)
"""
alpha = ((1 - mu) / sigma^2 - 1 / mu) * mu^2
beta = alpha * (1 / mu - 1)
Implemented slightly differently for numerical stability.
"""
alpha = (1 - mean) / var * mean.pow(2) - mean
beta = (1 - mean) / var * mean - 1 - alpha
return alpha, beta
def sample(self, x, deterministic):
if deterministic is False:
action = self.evaluate(x).sample()
else:
return self.evaluate(x).mean
return 2 * action - 1
def evaluate(self, x):
alpha, beta = self(x)
return BoundedBeta(alpha, beta)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'action_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1.0
tmp3 = tmp2 - tmp1
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 * tmp5
tmp7 = tmp3 / tmp6
tmp8 = tmp1 * tmp1
tmp9 = tmp7 * tmp8
tmp10 = tmp9 - tmp1
tmp11 = tmp7 * tmp1
tmp12 = tmp11 - tmp2
tmp13 = tmp12 - tmp10
tl.store(out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr1 + x2, tmp13, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0[grid(256)](arg0_1,
arg1_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0, buf1
class BoundedBeta(torch.distributions.Beta):
def log_prob(self, x):
return super().log_prob((x + 1) / 2)
class Beta2New(nn.Module):
def __init__(self, action_dim, init_std=0.25, learn_std=False):
super(Beta2New, self).__init__()
assert init_std < 0.5, 'Beta distribution has a max std dev of 0.5'
self.action_dim = action_dim
self.logstd = nn.Parameter(torch.ones(1, action_dim) * np.log(
init_std), requires_grad=learn_std)
self.learn_std = learn_std
def sample(self, x, deterministic):
if deterministic is False:
action = self.evaluate(x).sample()
else:
return self.evaluate(x).mean
return 2 * action - 1
def evaluate(self, x):
alpha, beta = self(x)
return BoundedBeta(alpha, beta)
def forward(self, input_0):
arg1_1 = self.logstd
arg0_1 = input_0
output = call([arg0_1, arg1_1])
return output[0], output[1]
| RohanPankaj/apex | Beta2 | false | 998 | [
"MIT"
] | 0 | 74e96386bf9446d1179106d6d65ea0368c1b5b27 | https://github.com/RohanPankaj/apex/tree/74e96386bf9446d1179106d6d65ea0368c1b5b27 | import torch
import numpy as np
import torch.nn as nn
class BoundedBeta(torch.distributions.Beta):
def log_prob(self, x):
return super().log_prob((x + 1) / 2)
class Model(nn.Module):
def __init__(self, action_dim, init_std=0.25, learn_std=False):
super().__init__()
assert init_std < 0.5, 'Beta distribution has a max std dev of 0.5'
self.action_dim = action_dim
self.logstd = nn.Parameter(torch.ones(1, action_dim) * np.log(
init_std), requires_grad=learn_std)
self.learn_std = learn_std
def forward(self, x):
mean = torch.sigmoid(x)
var = self.logstd.exp().pow(2)
"""
alpha = ((1 - mu) / sigma^2 - 1 / mu) * mu^2
beta = alpha * (1 / mu - 1)
Implemented slightly differently for numerical stability.
"""
alpha = (1 - mean) / var * mean.pow(2) - mean
beta = (1 - mean) / var * mean - 1 - alpha
return alpha, beta
def sample(self, x, deterministic):
if deterministic is False:
action = self.evaluate(x).sample()
else:
return self.evaluate(x).mean
return 2 * action - 1
def evaluate(self, x):
alpha, beta = self(x)
return BoundedBeta(alpha, beta)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
LN_Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xn/cxnnvcbpimtewxjt4mimmcqawz5gv2a6icv2tr6fsnnrymigvs3d.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.native_layer_norm]
# Source node to ATen node mapping:
# x => relu
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%relu, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_relu_0 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + (x0), tmp13, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6g/c6ggq4jcdiezt4q7ougkmhavz4u3ubava2hzv5rz7w6i6lfww67j.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.native_layer_norm]
# Source node to ATen node mapping:
# x => relu
# x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%relu, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
triton_poi_fused_native_layer_norm_relu_1 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ns/cnszijuiz432ctw37rqktvk3syr2vugzeuatmva3neoizic6f3sq.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_4 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_relu_0.run(buf0, buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_1.run(buf0, buf1, buf2, primals_4, primals_5, buf3, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = buf2; del buf2 # reuse
buf6 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_0.run(buf4, buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_1.run(buf4, buf5, buf6, primals_8, primals_9, buf7, 256, grid=grid(256), stream=stream0)
del buf5
del buf6
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.tanh]
triton_poi_fused_tanh_2.run(buf9, primals_11, 256, grid=grid(256), stream=stream0)
del primals_11
return (buf9, primals_4, primals_8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf9, primals_10, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LN_Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action, hidden_size1,
hidden_size2):
super(LN_Actor, self).__init__()
self.l1 = nn.Linear(state_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, action_dim)
def forward(self, x):
x = F.relu(self.l1(x))
x = self.ln1(x)
x = F.relu(self.l2(x))
x = self.ln2(x)
x = torch.tanh(self.l3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4,
'hidden_size1': 4, 'hidden_size2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_relu_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_relu_0[grid(64)](buf0, buf1,
buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_1[grid(256)](buf0, buf1,
buf2, primals_4, primals_5, buf3, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = buf2
del buf2
buf6 = buf1
del buf1
triton_poi_fused_native_layer_norm_relu_0[grid(64)](buf4, buf5,
buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_1[grid(256)](buf4, buf5,
buf6, primals_8, primals_9, buf7, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf5
del buf6
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused_tanh_2[grid(256)](buf9, primals_11, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
return buf9, primals_4, primals_8, reinterpret_tensor(primals_3, (64, 4
), (4, 1), 0), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0
), buf4, reinterpret_tensor(buf7, (64, 4), (4, 1), 0
), buf9, primals_10, primals_6
class LN_ActorNew(nn.Module):
def __init__(self, state_dim, action_dim, max_action, hidden_size1,
hidden_size2):
super(LN_ActorNew, self).__init__()
self.l1 = nn.Linear(state_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, action_dim)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.ln1.weight
primals_5 = self.ln1.bias
primals_6 = self.l2.weight
primals_7 = self.l2.bias
primals_8 = self.ln2.weight
primals_9 = self.ln2.bias
primals_10 = self.l3.weight
primals_11 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| RohanPankaj/apex | LN_Actor | false | 999 | [
"MIT"
] | 0 | 74e96386bf9446d1179106d6d65ea0368c1b5b27 | https://github.com/RohanPankaj/apex/tree/74e96386bf9446d1179106d6d65ea0368c1b5b27 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, state_dim, action_dim, max_action, hidden_size1,
hidden_size2):
super().__init__()
self.l1 = nn.Linear(state_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, action_dim)
def forward(self, x):
x = F.relu(self.l1(x))
x = self.ln1(x)
x = F.relu(self.l2(x))
x = self.ln2(x)
x = torch.tanh(self.l3(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'max_action': 4,
'hidden_size1': 4, 'hidden_size2': 4}]
|
CFReLU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ij/cijpjgmyxrhgzewma6oaiqf3rbibzuwqb6fihby3gaafnkiscy46.py
# Topologically Sorted Source Nodes: [add, relu, add_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# relu => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %primals_3), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp5 + tmp8
tmp10 = 0.0
tmp11 = tmp5 <= tmp10
tl.store(out_ptr0 + (x0), tmp9, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [add, relu, add_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0.run(primals_2, primals_1, primals_3, buf0, buf1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
del primals_3
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((), (), device='cuda:0', dtype=torch.int64)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CFReLU(nn.Module):
"""
Custom FReLU
cfrelu(x) = relu(x + a) + b
see psychXRF.activation.FReLU
Shape:
-Input: (N, *)
-Output: (N, *), same shape as the input
Parameters:
-a: trainable parameter
-b: fixed parameter
Examples:
>>> input = torch.randn(300, 6)
>>> act = CFReLU(input.shape[1], b = 1.0e-6)
>>> x = act(input)
"""
def __init__(self, in_features, b, a=None):
"""
Initialization
a is initialized with zero value by default
"""
super(CFReLU, self).__init__()
self.relu = F.relu
self.in_features = in_features
self.b = nn.Parameter(torch.tensor(b), requires_grad=False)
if a:
self.a = nn.Parameter(torch.tensor(a))
else:
self.a = nn.Parameter(torch.tensor(0.0))
self.a.requiresGrad = True
def forward(self, x):
return self.relu(x + self.a) + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'b': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp5 + tmp8
tmp10 = 0.0
tmp11 = tmp5 <= tmp10
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (), ())
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_relu_threshold_backward_0[grid(256)](primals_2,
primals_1, primals_3, buf0, buf1, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_1
del primals_2
del primals_3
return buf0, buf1
class CFReLUNew(nn.Module):
"""
Custom FReLU
cfrelu(x) = relu(x + a) + b
see psychXRF.activation.FReLU
Shape:
-Input: (N, *)
-Output: (N, *), same shape as the input
Parameters:
-a: trainable parameter
-b: fixed parameter
Examples:
>>> input = torch.randn(300, 6)
>>> act = CFReLU(input.shape[1], b = 1.0e-6)
>>> x = act(input)
"""
def __init__(self, in_features, b, a=None):
"""
Initialization
a is initialized with zero value by default
"""
super(CFReLUNew, self).__init__()
self.relu = F.relu
self.in_features = in_features
self.b = nn.Parameter(torch.tensor(b), requires_grad=False)
if a:
self.a = nn.Parameter(torch.tensor(a))
else:
self.a = nn.Parameter(torch.tensor(0.0))
self.a.requiresGrad = True
def forward(self, input_0):
primals_1 = self.b
primals_3 = self.a
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| RosarioAndolina/psychXRF | CFReLU | false | 1,000 | [
"MIT"
] | 0 | e2adadbd17664d7f74c10304f84b3751c571226e | https://github.com/RosarioAndolina/psychXRF/tree/e2adadbd17664d7f74c10304f84b3751c571226e | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Custom FReLU
cfrelu(x) = relu(x + a) + b
see psychXRF.activation.FReLU
Shape:
-Input: (N, *)
-Output: (N, *), same shape as the input
Parameters:
-a: trainable parameter
-b: fixed parameter
Examples:
>>> input = torch.randn(300, 6)
>>> act = CFReLU(input.shape[1], b = 1.0e-6)
>>> x = act(input)
"""
def __init__(self, in_features, b, a=None):
"""
Initialization
a is initialized with zero value by default
"""
super().__init__()
self.relu = F.relu
self.in_features = in_features
self.b = nn.Parameter(torch.tensor(b), requires_grad=False)
if a:
self.a = nn.Parameter(torch.tensor(a))
else:
self.a = nn.Parameter(torch.tensor(0.0))
self.a.requiresGrad = True
def forward(self, x):
return self.relu(x + self.a) + self.b
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LN_DDPGCritic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# xu => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yv/cyvyo4ngtqx5yqi3aouvj5rpeudtztqrqbcgywvc5tyw4we3zr4n.py
# Topologically Sorted Source Nodes: [x1, x1_1], Original ATen: [aten.relu, aten.native_layer_norm]
# Source node to ATen node mapping:
# x1 => relu
# x1_1 => add, rsqrt, var_mean
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%addmm,), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%relu, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_relu_1 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + (x0), tmp13, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/n3/cn3ucymmjowa2gnsrhjc57k3cvwek7mc56s6tp6mjfw5s53i4pqk.py
# Topologically Sorted Source Nodes: [x1, x1_1], Original ATen: [aten.relu, aten.native_layer_norm]
# Source node to ATen node mapping:
# x1 => relu
# x1_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%addmm,), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%relu, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_5), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {})
triton_poi_fused_native_layer_norm_relu_2 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (1, 4), (4, 1))
assert_size_stride(primals_12, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [x1, x1_1], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_1.run(buf1, buf2, buf3, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1, x1_1], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_2.run(buf1, buf2, buf3, primals_5, primals_6, buf4, 16, grid=grid(16), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = buf3; del buf3 # reuse
buf7 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x1_2, x1_3], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_1.run(buf5, buf6, buf7, 4, grid=grid(4), stream=stream0)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1_2, x1_3], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_2.run(buf5, buf6, buf7, primals_9, primals_10, buf8, 16, grid=grid(16), stream=stream0)
del buf6
del primals_10
buf10 = reinterpret_tensor(buf7, (4, 1), (1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x1_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, buf8, reinterpret_tensor(primals_11, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_12
return (buf10, primals_5, primals_9, buf0, buf1, buf4, buf5, buf8, primals_11, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LN_DDPGCritic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size1, hidden_size2):
super(LN_DDPGCritic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, 1)
def forward(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = self.ln1(x1)
x1 = F.relu(self.l2(x1))
x1 = self.ln2(x1)
x1 = self.l3(x1)
return x1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'hidden_size1': 4,
'hidden_size2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (1, 4), (4, 1))
assert_size_stride(primals_12, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_relu_1[grid(4)](buf1, buf2, buf3,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_2[grid(16)](buf1, buf2,
buf3, primals_5, primals_6, buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = buf3
del buf3
buf7 = buf2
del buf2
triton_poi_fused_native_layer_norm_relu_1[grid(4)](buf5, buf6, buf7,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_2[grid(16)](buf5, buf6,
buf7, primals_9, primals_10, buf8, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf6
del primals_10
buf10 = reinterpret_tensor(buf7, (4, 1), (1, 1), 0)
del buf7
extern_kernels.addmm(primals_12, buf8, reinterpret_tensor(
primals_11, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_12
return (buf10, primals_5, primals_9, buf0, buf1, buf4, buf5, buf8,
primals_11, primals_7)
class LN_DDPGCriticNew(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size1, hidden_size2):
super(LN_DDPGCriticNew, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, 1)
def forward(self, input_0, input_1):
primals_3 = self.l1.weight
primals_4 = self.l1.bias
primals_5 = self.ln1.weight
primals_6 = self.ln1.bias
primals_1 = self.l2.weight
primals_8 = self.l2.bias
primals_9 = self.ln2.weight
primals_10 = self.ln2.bias
primals_11 = self.l3.weight
primals_12 = self.l3.bias
primals_2 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| RohanPankaj/apex | LN_DDPGCritic | false | 1,001 | [
"MIT"
] | 0 | 74e96386bf9446d1179106d6d65ea0368c1b5b27 | https://github.com/RohanPankaj/apex/tree/74e96386bf9446d1179106d6d65ea0368c1b5b27 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size1, hidden_size2):
super().__init__()
self.l1 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, 1)
def forward(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = self.ln1(x1)
x1 = F.relu(self.l2(x1))
x1 = self.ln2(x1)
x1 = self.l3(x1)
return x1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'hidden_size1': 4,
'hidden_size2': 4}]
|
ReQUNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/be/cbe2chpvmm7us6roxewr7w2th6vorbi5luzhicrdhynmx5fq62e3.py
# Topologically Sorted Source Nodes: [lt, setitem, z], Original ATen: [aten.lt, aten.lift_fresh, aten.index_put, aten.mul]
# Source node to ATen node mapping:
# lt => lt
# setitem => full_default, index_put
# z => mul
# Graph fragment:
# %lt : [num_users=2] = call_function[target=torch.ops.aten.lt.Scalar](args = (%view_1, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%view_1, [%lt], %full_default), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %view_3), kwargs = {})
triton_poi_fused_index_put_lift_fresh_lt_mul_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_lt_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_lt_mul_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr2'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_lt_mul_0(in_ptr0, out_ptr0, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 0.0
tmp2 = tmp0 < tmp1
tmp3 = tl.where(tmp2, tmp1, tmp0)
tmp4 = tmp3 * tmp3
tl.store(out_ptr0 + (x0), tmp2, None)
tl.store(out_ptr2 + (x0), tmp3, None)
tl.store(out_ptr3 + (x0), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/tz/ctzengiku4fpyacmhgujhvarriu4wwirpgay5u6a5wsrq2v75w32.py
# Topologically Sorted Source Nodes: [pred_for_acc], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# pred_for_acc => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_6, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_6, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 12
x2 = (xindex // 48)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (12 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (24 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (36 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/bv/cbvhs2xxondnsndwd2fomugf22ux53yglufxq4ntkf3shlcw366c.py
# Topologically Sorted Source Nodes: [pred_for_acc], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# pred_for_acc => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 12
x2 = (xindex // 48)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (12 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (24 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (36 + x0 + (48*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (3, 64), (64, 1))
assert_size_stride(primals_5, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [lt, setitem, z], Original ATen: [aten.lt, aten.lift_fresh, aten.index_put, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_lt_mul_0.run(buf0, buf1, buf0, buf4, 4096, grid=grid(4096), stream=stream0)
buf5 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
# Topologically Sorted Source Nodes: [pred], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf4, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 3), (1, 64), 0), alpha=1, beta=1, out=buf5)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [pred_for_acc], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf5, buf6, 192, grid=grid(192), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [pred_for_acc], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf6, buf7, 192, grid=grid(192), stream=stream0)
del buf6
return (reinterpret_tensor(buf5, (4, 4, 4, 3), (48, 12, 3, 1), 0), buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0), reinterpret_tensor(buf4, (64, 64), (64, 1), 0), buf7, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((3, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def MyReQU(x):
x[x < 0] = 0
z = x * x
return z
class ReQUNet(nn.Module):
def __init__(self):
super(ReQUNet, self).__init__()
n_in, n_h, n_out = 4, 64, 3
self.fc1 = nn.Linear(n_in, n_h, True)
self.fc2 = nn.Linear(n_h, n_out, True)
def forward(self, x):
h = MyReQU(self.fc1(x))
pred = self.fc2(h)
soft = nn.Softmax()
pred_for_acc = soft(pred)
return pred, pred_for_acc
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_index_put_lift_fresh_lt_mul_0(in_ptr0, out_ptr0,
out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 < tmp1
tmp3 = tl.where(tmp2, tmp1, tmp0)
tmp4 = tmp3 * tmp3
tl.store(out_ptr0 + x0, tmp2, None)
tl.store(out_ptr2 + x0, tmp3, None)
tl.store(out_ptr3 + x0, tmp4, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 12
x2 = xindex // 48
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (12 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (24 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (36 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 12
x2 = xindex // 48
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (12 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (24 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (36 + x0 + 48 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (3, 64), (64, 1))
assert_size_stride(primals_5, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
buf4 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_lt_mul_0[grid(4096)](buf0,
buf1, buf0, buf4, 4096, XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf4, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_4, (64, 3), (1, 64), 0),
alpha=1, beta=1, out=buf5)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
triton_poi_fused__softmax_1[grid(192)](buf5, buf6, 192, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
triton_poi_fused__softmax_2[grid(192)](buf6, buf7, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del buf6
return reinterpret_tensor(buf5, (4, 4, 4, 3), (48, 12, 3, 1), 0
), buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0
), reinterpret_tensor(buf4, (64, 64), (64, 1), 0), buf7, primals_4
def MyReQU(x):
x[x < 0] = 0
z = x * x
return z
class ReQUNetNew(nn.Module):
def __init__(self):
super(ReQUNetNew, self).__init__()
n_in, n_h, n_out = 4, 64, 3
self.fc1 = nn.Linear(n_in, n_h, True)
self.fc2 = nn.Linear(n_h, n_out, True)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1]
| RoyHirsch/DeepLearningCourse | ReQUNet | false | 1,002 | [
"MIT"
] | 0 | 9036c0fdbb08b610524d7be991f8e4b490a82c6c | https://github.com/RoyHirsch/DeepLearningCourse/tree/9036c0fdbb08b610524d7be991f8e4b490a82c6c | import torch
import torch.nn as nn
def MyReQU(x):
x[x < 0] = 0
z = x * x
return z
class Model(nn.Module):
def __init__(self):
super().__init__()
n_in, n_h, n_out = 4, 64, 3
self.fc1 = nn.Linear(n_in, n_h, True)
self.fc2 = nn.Linear(n_h, n_out, True)
def forward(self, x):
h = MyReQU(self.fc1(x))
pred = self.fc2(h)
soft = nn.Softmax()
pred_for_acc = soft(pred)
return pred, pred_for_acc
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SumNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/dz/cdzryvtc5xcfzqqm63c556qj4lnbgf74xepjuv4ftcjjramnhyrm.py
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
# Source node to ATen node mapping:
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %view), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SumNorm(nn.Module):
"""
Normalize dividing by the sum
Shape:
-Input: (N, *)
-Output: (N, *), same shape as the input
Parameters:
-in_features: number of input features
-dim(int): A dimension along witch sum will be computed
Examples:
>>> input = torch.randn(300, 4)
>>> afunc = SumNorm(input.shape[1], dim = 1)
>>> x = afunc(input)
"""
def __init__(self, in_features, dim=1):
super(SumNorm, self).__init__()
self.in_features = in_features
self.dim = dim
def forward(self, x):
return x / x.sum(dim=self.dim).view(x.shape[0], 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class SumNormNew(nn.Module):
"""
Normalize dividing by the sum
Shape:
-Input: (N, *)
-Output: (N, *), same shape as the input
Parameters:
-in_features: number of input features
-dim(int): A dimension along witch sum will be computed
Examples:
>>> input = torch.randn(300, 4)
>>> afunc = SumNorm(input.shape[1], dim = 1)
>>> x = afunc(input)
"""
def __init__(self, in_features, dim=1):
super(SumNormNew, self).__init__()
self.in_features = in_features
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| RosarioAndolina/psychXRF | SumNorm | false | 1,003 | [
"MIT"
] | 0 | e2adadbd17664d7f74c10304f84b3751c571226e | https://github.com/RosarioAndolina/psychXRF/tree/e2adadbd17664d7f74c10304f84b3751c571226e | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Normalize dividing by the sum
Shape:
-Input: (N, *)
-Output: (N, *), same shape as the input
Parameters:
-in_features: number of input features
-dim(int): A dimension along witch sum will be computed
Examples:
>>> input = torch.randn(300, 4)
>>> afunc = SumNorm(input.shape[1], dim = 1)
>>> x = afunc(input)
"""
def __init__(self, in_features, dim=1):
super().__init__()
self.in_features = in_features
self.dim = dim
def forward(self, x):
return x / x.sum(dim=self.dim).view(x.shape[0], 1)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
PITF_Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/yo/cyo5c3qpjjeovvnksg7l66uxw5vj6kk2cjxmo4mrnxkgs355wopn.py
# Topologically Sorted Source Nodes: [sub, sigmoid, log, neg], Original ATen: [aten.sub, aten.sigmoid, aten.log, aten.neg]
# Source node to ATen node mapping:
# log => log
# neg => neg
# sigmoid => sigmoid
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%sub,), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {})
triton_poi_fused_log_neg_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_log_neg_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_log_neg_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_log_neg_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tl_math.log(tmp3)
tmp5 = -tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, sigmoid, log, neg], Original ATen: [aten.sub, aten.sigmoid, aten.log, aten.neg]
stream0 = get_raw_stream(0)
triton_poi_fused_log_neg_sigmoid_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch as t
import torch.nn as nn
class PITF_Loss(nn.Module):
"""
定义PITF的loss function
"""
def __init__(self):
super(PITF_Loss, self).__init__()
None
def forward(self, r_p, r_ne):
return -t.log(t.sigmoid(r_p - r_ne))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_log_neg_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tl_math.log(tmp3)
tmp5 = -tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_log_neg_sigmoid_sub_0[grid(256)](arg0_1, arg1_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class PITF_LossNew(nn.Module):
"""
定义PITF的loss function
"""
def __init__(self):
super(PITF_LossNew, self).__init__()
None
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| SamHaoYuan/pitf | PITF_Loss | false | 1,004 | [
"MIT"
] | 0 | 5fdebc3b44c6462126876101b052a3980804da79 | https://github.com/SamHaoYuan/pitf/tree/5fdebc3b44c6462126876101b052a3980804da79 | import torch
import torch as t
import torch.nn as nn
class Model(nn.Module):
"""
定义PITF的loss function
"""
def __init__(self):
super().__init__()
None
def forward(self, r_p, r_ne):
return -t.log(t.sigmoid(r_p - r_ne))
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SpatialGather_Module | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/iv/civr7hz7pwb7nd5q352sqsjvxezkx6m6jnyztaygkt2ugewh5ejx.py
# Topologically Sorted Source Nodes: [probs_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# probs_1 => div, exp, sum_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp9 / tmp13
tl.store(out_ptr2 + (r1 + (16*x0)), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [probs_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_0.run(arg0_1, buf2, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [probs_1, matmul], Original ATen: [aten._softmax, aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), out=buf3)
del arg1_1
del buf2
return (reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import functional as F
import torch._utils
import torch.optim
class SpatialGather_Module(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_Module, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, _h, _w = probs.size(0), probs.size(1), probs.size(2
), probs.size(3)
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1)
probs = F.softmax(self.scale * probs, dim=2)
ocr_context = torch.matmul(probs, feats).permute(0, 2, 1).unsqueeze(3)
return ocr_context
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch._utils
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp9 / tmp13
tl.store(out_ptr2 + (r1 + 16 * x0), tmp14, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64,
1, 16), 0), out=buf3)
del arg1_1
del buf2
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 1), 0),
class SpatialGather_ModuleNew(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_ModuleNew, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| SSJIACV/HRNet-Semantic-Segmentation | SpatialGather_Module | false | 1,005 | [
"MIT"
] | 0 | 7e2840ce7a91ae3845dfb203c992f84affa15e40 | https://github.com/SSJIACV/HRNet-Semantic-Segmentation/tree/7e2840ce7a91ae3845dfb203c992f84affa15e40 | import torch
import torch.nn as nn
from torch.nn import functional as F
import torch._utils
import torch.optim
class Model(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, cls_num=0, scale=1):
super().__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, _h, _w = probs.size(0), probs.size(1), probs.size(2
), probs.size(3)
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1)
probs = F.softmax(self.scale * probs, dim=2)
ocr_context = torch.matmul(probs, feats).permute(0, 2, 1).unsqueeze(3)
return ocr_context
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
_boundary | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [residual, residual_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# residual => convolution
# residual_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gz/cgz3i5sfiy2r2zcw7on5c3vgziypvqy3tqtrdk4orqs3phgqxlob.py
# Topologically Sorted Source Nodes: [residual_2, out], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out => add
# residual_2 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %convolution_1), kwargs = {})
triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_out_ptr0 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [residual, residual_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [residual_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [residual_2, out], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_1.run(buf3, primals_3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class _boundary(nn.Module):
def __init__(self, dim):
super(_boundary, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
def forward(self, x):
residual = self.conv1(x)
residual = self.relu(residual)
residual = self.conv2(residual)
out = x + residual
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_convolution_1[grid(256)](buf3, primals_3,
primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1
class _boundaryNew(nn.Module):
def __init__(self, dim):
super(_boundaryNew, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| STARBOYsachin/semantic-segmentation | _boundary | false | 1,006 | [
"MIT"
] | 0 | 7f553a93b717641edc6c2d463903dfab67267039 | https://github.com/STARBOYsachin/semantic-segmentation/tree/7f553a93b717641edc6c2d463903dfab67267039 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, dim):
super().__init__()
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
def forward(self, x):
residual = self.conv1(x)
residual = self.relu(residual)
residual = self.conv2(residual)
out = x + residual
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SinglePITF_Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xb/cxbzd3p6jl5zmocc37mlzv76h2fza5ke5ekhz7h7fnpf3udjofe2.py
# Topologically Sorted Source Nodes: [sigmoid, log, neg, sum_1], Original ATen: [aten.sigmoid, aten.log, aten.neg, aten.sum]
# Source node to ATen node mapping:
# log => log
# neg => neg
# sigmoid => sigmoid
# sum_1 => sum_1
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%neg,), kwargs = {})
triton_per_fused_log_neg_sigmoid_sum_0 = async_compile.triton('triton_per_fused_log_neg_sigmoid_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log_neg_sigmoid_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_log_neg_sigmoid_sum_0(in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tl_math.log(tmp1)
tmp3 = -tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, log, neg, sum_1], Original ATen: [aten.sigmoid, aten.log, aten.neg, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_log_neg_sigmoid_sum_0.run(arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch as t
import torch.nn as nn
class SinglePITF_Loss(nn.Module):
"""
定义PITF的loss function
"""
def __init__(self):
super(SinglePITF_Loss, self).__init__()
None
def forward(self, r):
return t.sum(-t.log(t.sigmoid(r)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_log_neg_sigmoid_sum_0(in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = tl_math.log(tmp1)
tmp3 = -tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_log_neg_sigmoid_sum_0[grid(1)](arg0_1, buf0, 1,
256, num_warps=2, num_stages=1)
del arg0_1
return buf0,
class SinglePITF_LossNew(nn.Module):
"""
定义PITF的loss function
"""
def __init__(self):
super(SinglePITF_LossNew, self).__init__()
None
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| SamHaoYuan/pitf | SinglePITF_Loss | false | 1,007 | [
"MIT"
] | 0 | 5fdebc3b44c6462126876101b052a3980804da79 | https://github.com/SamHaoYuan/pitf/tree/5fdebc3b44c6462126876101b052a3980804da79 | import torch
import torch as t
import torch.nn as nn
class Model(nn.Module):
"""
定义PITF的loss function
"""
def __init__(self):
super().__init__()
None
def forward(self, r):
return t.sum(-t.log(t.sigmoid(r)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CAMMNISTClassifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/2x/c2xnlfml7v6uboqk242zwfdxrawephuy6yfg2pq7i4yip73axefe.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ne/cnepmjd66uu3laeexeusfxab3aayptiri2wp2knrgtgmx52tvzxj.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/u2/cu2m3b7o6dwplojxah26y2q5h6lbvoedyei4kep3vvnzwe7vkgsn.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 3844
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (3844*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + (32*x2) + (123008*y1)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/tg/ctgpg6u4ktcwk2lkrtfadblsktjvmxihx4s466rrtkg7dn7mcl4f.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32) % 31
x2 = (xindex // 992)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (3968*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (3968*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (1984 + x0 + (64*x1) + (3968*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (2016 + x0 + (64*x1) + (3968*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4d/c4dfnf5nxia24xxkghzop4eoinjvrc4vzqjvqrhbghdmzlaefuu3.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 215296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/dk/cdkgbaby3r2xif2evkxgfomw5kmoherisf2ldodrp3aludwtsxk5.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 14
x2 = (xindex // 896) % 14
x3 = (xindex // 12544)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (3712*x2) + (53824*x3)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (3712*x2) + (53824*x3)), xmask)
tmp3 = tl.load(in_ptr0 + (1856 + x0 + (128*x1) + (3712*x2) + (53824*x3)), xmask)
tmp5 = tl.load(in_ptr0 + (1920 + x0 + (128*x1) + (3712*x2) + (53824*x3)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4), tmp6, xmask)
tl.store(out_ptr1 + (x4), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ts/ctsdvxet67xj75o5d443eylzlf7yjmxhn6lm6v4xecbi22dhqpad.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.mean]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# x_3 => mean
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_2, [-1, -2], True), kwargs = {})
triton_red_fused_convolution_mean_relu_6 = async_compile.triton('triton_red_fused_convolution_mean_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1024, 128],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_convolution_mean_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_convolution_mean_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1024
rnumel = 72
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 128
x1 = (xindex // 128)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
_tmp6 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*r2) + (9216*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = _tmp6 + tmp5
_tmp6 = tl.where(rmask & xmask, tmp7, _tmp6)
tmp6 = tl.sum(_tmp6, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vi/cvi4ovo32fwdzzp5aadeksyicr3mfbx7eue3p7kl73nprt55exzi.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.mean]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# x_3 => mean
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_2, [-1, -2], True), kwargs = {})
triton_per_fused_convolution_mean_relu_7 = async_compile.triton('triton_per_fused_convolution_mean_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 2],
reduction_hint=ReductionHint.OUTER_TINY,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_mean_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_convolution_mean_relu_7(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 512
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 128
x1 = (xindex // 128)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*r2) + (256*x1)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 144.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6a/c6aydbuigtu7cm4gfypg4ok6wfir5bprkkhebcgwmh2oblbktqaq.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 73728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 2048, 9, grid=grid(2048, 9), stream=stream0)
del primals_4
buf1 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf3 = empty_strided_cuda((4, 32, 62, 62), (123008, 1, 1984, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf2, primals_2, buf3, 128, 3844, grid=grid(128, 3844), stream=stream0)
del buf2
del primals_2
buf4 = empty_strided_cuda((4, 32, 31, 31), (30752, 1, 992, 32), torch.float32)
buf5 = empty_strided_cuda((4, 32, 31, 31), (30752, 1, 992, 32), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf3, buf4, buf5, 123008, grid=grid(123008), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 29, 29), (53824, 1, 1856, 64))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf7, primals_5, 215296, grid=grid(215296), stream=stream0)
del primals_5
buf8 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.float32)
buf9 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf7, buf8, buf9, 50176, grid=grid(50176), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf8, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 12, 12), (18432, 1, 1536, 128))
buf11 = empty_strided_cuda((4, 128, 1, 1, 2), (256, 1, 1024, 1024, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.mean]
triton_red_fused_convolution_mean_relu_6.run(buf10, primals_7, buf11, 1024, 72, grid=grid(1024), stream=stream0)
buf12 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512), torch.float32)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.mean]
triton_per_fused_convolution_mean_relu_7.run(buf13, buf11, 512, 2, grid=grid(512), stream=stream0)
del buf11
buf14 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf13, (4, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf14)
del primals_9
buf15 = empty_strided_cuda((4, 128, 12, 12), (18432, 1, 1536, 128), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf10, primals_7, buf15, 73728, grid=grid(73728), stream=stream0)
del buf10
del primals_7
return (buf14, primals_1, primals_3, buf0, buf1, buf3, buf4, buf5, buf7, buf8, buf9, reinterpret_tensor(buf13, (4, 128), (128, 1), 0), primals_8, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn as nn
from torch import optim as optim
from torchvision import transforms as transforms
class CAMMNISTClassifier(nn.Module):
def __init__(self):
super(CAMMNISTClassifier, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
self.relu3 = nn.ReLU()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
x = self.relu3(self.conv3(x))
x = self.avgpool(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn as nn
from torch import optim as optim
from torchvision import transforms as transforms
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 3844
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 3844 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 32 * x2 + 123008 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32 % 31
x2 = xindex // 992
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 3968 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 3968 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (1984 + x0 + 64 * x1 + 3968 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (2016 + x0 + 64 * x1 + 3968 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 215296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 14
x2 = xindex // 896 % 14
x3 = xindex // 12544
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 3712 * x2 + 53824 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 3712 * x2 + 53824 * x3),
xmask)
tmp3 = tl.load(in_ptr0 + (1856 + x0 + 128 * x1 + 3712 * x2 + 53824 * x3
), xmask)
tmp5 = tl.load(in_ptr0 + (1920 + x0 + 128 * x1 + 3712 * x2 + 53824 * x3
), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x4, tmp6, xmask)
tl.store(out_ptr1 + x4, tmp16, xmask)
@triton.jit
def triton_red_fused_convolution_mean_relu_6(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 72
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 128
x1 = xindex // 128
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
_tmp6 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * r2 + 9216 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = _tmp6 + tmp5
_tmp6 = tl.where(rmask & xmask, tmp7, _tmp6)
tmp6 = tl.sum(_tmp6, 1)[:, None]
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_per_fused_convolution_mean_relu_7(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 512
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 128
x1 = xindex // 128
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * r2 + 256 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 144.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 9)](primals_4, buf0, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_1[grid(8192, 9)](primals_6, buf1, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf3 = empty_strided_cuda((4, 32, 62, 62), (123008, 1, 1984, 32),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(128, 3844)](buf2,
primals_2, buf3, 128, 3844, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
del buf2
del primals_2
buf4 = empty_strided_cuda((4, 32, 31, 31), (30752, 1, 992, 32),
torch.float32)
buf5 = empty_strided_cuda((4, 32, 31, 31), (30752, 1, 992, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(123008)](buf3, buf4,
buf5, 123008, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 29, 29), (53824, 1, 1856, 64))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_4[grid(215296)](buf7, primals_5,
215296, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf8 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64),
torch.float32)
buf9 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(50176)](buf7, buf8,
buf9, 50176, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf8, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 12, 12), (18432, 1, 1536, 128))
buf11 = empty_strided_cuda((4, 128, 1, 1, 2), (256, 1, 1024, 1024,
128), torch.float32)
triton_red_fused_convolution_mean_relu_6[grid(1024)](buf10,
primals_7, buf11, 1024, 72, XBLOCK=64, RBLOCK=8, num_warps=4,
num_stages=1)
buf12 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512),
torch.float32)
buf13 = buf12
del buf12
triton_per_fused_convolution_mean_relu_7[grid(512)](buf13, buf11,
512, 2, XBLOCK=256, num_warps=4, num_stages=1)
del buf11
buf14 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf13, (4, 128),
(128, 1), 0), reinterpret_tensor(primals_8, (128, 10), (1, 128),
0), alpha=1, beta=1, out=buf14)
del primals_9
buf15 = empty_strided_cuda((4, 128, 12, 12), (18432, 1, 1536, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(73728)](
buf10, primals_7, buf15, 73728, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf10
del primals_7
return (buf14, primals_1, primals_3, buf0, buf1, buf3, buf4, buf5, buf7,
buf8, buf9, reinterpret_tensor(buf13, (4, 128), (128, 1), 0),
primals_8, buf15)
class CAMMNISTClassifierNew(nn.Module):
def __init__(self):
super(CAMMNISTClassifierNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
self.relu3 = nn.ReLU()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc.weight
primals_9 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| RobinMaas95/GTSRB_Visualization | CAMMNISTClassifier | false | 1,008 | [
"MIT"
] | 0 | fa837ff94e089a936ef4f4418970d262b35f70b6 | https://github.com/RobinMaas95/GTSRB_Visualization/tree/fa837ff94e089a936ef4f4418970d262b35f70b6 | import torch
from torch import nn as nn
from torch import optim as optim
from torchvision import transforms as transforms
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
self.relu3 = nn.ReLU()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
x = self.pool2(self.relu2(self.conv2(x)))
x = self.relu3(self.conv3(x))
x = self.avgpool(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
Standardize | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/kr/ckruyt6maca4ngviwzgzhxi4vkkf6jukusb63cym7d2bppobecfc.py
# Topologically Sorted Source Nodes: [x, add, x_1], Original ATen: [aten.sub, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# x => sub
# x_1 => div
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
triton_poi_fused_add_div_sub_0 = async_compile.triton('triton_poi_fused_add_div_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = tmp2 / tmp5
tl.store(out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr1 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, add, x_1], Original ATen: [aten.sub, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_sub_0.run(primals_2, primals_1, primals_3, buf0, buf1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf0, buf1, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch.nn import init
from torch.nn.parameter import Parameter
class Standardize(Module):
"""
Applies (element-wise) standardization with trainable translation parameter μ and scale parameter σ, i.e. computes
(x - μ) / σ where '/' is applied element-wise.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn a translation parameter μ.
Default: ``True``
Attributes:
mu: the learnable translation parameter μ.
std: the learnable scale parameter σ.
"""
__constants__ = ['mu']
def __init__(self, in_features, bias=True, eps=1e-06):
super(Standardize, self).__init__()
self.in_features = in_features
self.out_features = in_features
self.eps = eps
self.std = Parameter(torch.Tensor(in_features))
if bias:
self.mu = Parameter(torch.Tensor(in_features))
else:
self.register_parameter('mu', None)
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.std, 1)
if self.mu is not None:
init.constant_(self.mu, 0)
def forward(self, x):
if self.mu is not None:
x -= self.mu
x = torch.div(x, self.std + self.eps)
return x
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.mu is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch.nn import init
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = tmp2 / tmp5
tl.store(out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr1 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_sub_0[grid(256)](primals_2, primals_1,
primals_3, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0, buf1, primals_3, buf0
class StandardizeNew(Module):
"""
Applies (element-wise) standardization with trainable translation parameter μ and scale parameter σ, i.e. computes
(x - μ) / σ where '/' is applied element-wise.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn a translation parameter μ.
Default: ``True``
Attributes:
mu: the learnable translation parameter μ.
std: the learnable scale parameter σ.
"""
__constants__ = ['mu']
def __init__(self, in_features, bias=True, eps=1e-06):
super(StandardizeNew, self).__init__()
self.in_features = in_features
self.out_features = in_features
self.eps = eps
self.std = Parameter(torch.Tensor(in_features))
if bias:
self.mu = Parameter(torch.Tensor(in_features))
else:
self.register_parameter('mu', None)
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.std, 1)
if self.mu is not None:
init.constant_(self.mu, 0)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.mu is not None)
def forward(self, input_0):
primals_1 = self.std
primals_3 = self.mu
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| SDJustus/Deep-SAD-PyTorch | Standardize | false | 1,009 | [
"MIT"
] | 0 | 4d98e6474a7256329134c075894f885a56f59281 | https://github.com/SDJustus/Deep-SAD-PyTorch/tree/4d98e6474a7256329134c075894f885a56f59281 | from torch.nn import Module
import torch
from torch.nn import init
from torch.nn.parameter import Parameter
class Model(Module):
"""
Applies (element-wise) standardization with trainable translation parameter μ and scale parameter σ, i.e. computes
(x - μ) / σ where '/' is applied element-wise.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn a translation parameter μ.
Default: ``True``
Attributes:
mu: the learnable translation parameter μ.
std: the learnable scale parameter σ.
"""
__constants__ = ['mu']
def __init__(self, in_features, bias=True, eps=1e-06):
super().__init__()
self.in_features = in_features
self.out_features = in_features
self.eps = eps
self.std = Parameter(torch.Tensor(in_features))
if bias:
self.mu = Parameter(torch.Tensor(in_features))
else:
self.register_parameter('mu', None)
self.reset_parameters()
def reset_parameters(self):
init.constant_(self.std, 1)
if self.mu is not None:
init.constant_(self.mu, 0)
def forward(self, x):
if self.mu is not None:
x -= self.mu
x = torch.div(x, self.std + self.eps)
return x
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.mu is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
NetFCN12 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/mx/cmxbucu53niw4mhkko67o2ijqxfkqksx6yvmymsdhrqavytlfa2x.py
# Topologically Sorted Source Nodes: [x, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# relu => relu
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 246016
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3844) % 16
x0 = xindex % 3844
x4 = (xindex // 3844)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3872*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/dc/cdcqgi2asu2drj2jlx5ukk5hqfivxebjhqip5p6rp4tfvzioqf5s.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 57600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = (xindex // 30) % 30
x2 = (xindex // 900)
x5 = xindex
x4 = (xindex // 14400)
x6 = xindex % 14400
tmp0 = tl.load(in_ptr0 + ((2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (62 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (63 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (124 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (125 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (126 + (2*x0) + (124*x1) + (3872*x2)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (x5), tmp16, xmask)
tl.store(out_ptr1 + (x6 + (14464*x4)), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/p5/cp5kyzagzpexw4kh6slbjcfecrddlnquqv3egkoyzy7u4qb6h57l.py
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 46656
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 729) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ge/cge6wng5the3ee5zgkaek4p3rauaayrmruhos66wcwk42j5gsxdg.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_3 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 5832
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 729) % 2
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (2, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_7, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 62, 62), (61504, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 16, 62, 62), (61952, 3872, 62, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 246016, grid=grid(246016), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 16, 30, 30), (14400, 900, 30, 1), torch.float32)
buf3 = empty_strided_cuda((4, 16, 30, 30), (14464, 900, 30, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 57600, grid=grid(57600), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 27, 27), (11664, 729, 27, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 46656, grid=grid(46656), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 2, 27, 27), (1458, 729, 27, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf7, primals_7, 5832, grid=grid(5832), stream=stream0)
del primals_7
return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class NetFCN12(nn.Module):
def __init__(self):
super(NetFCN12, self).__init__()
self.conv = nn.Conv2d(3, 16, 3)
self.pool = nn.MaxPool2d((3, 3), stride=2)
self.conv2 = nn.Conv2d(16, 16, 4)
self.conv3 = nn.Conv2d(16, 2, 1)
def forward(self, x):
x = self.conv(x)
x = self.pool(F.relu(x))
x = F.relu(self.conv2(x))
x = self.conv3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 246016
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 16
x0 = xindex % 3844
x4 = xindex // 3844
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3872 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 57600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x5 = xindex
x4 = xindex // 14400
x6 = xindex % 14400
tmp0 = tl.load(in_ptr0 + (2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (62 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (63 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (64 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (124 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (125 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (126 + 2 * x0 + 124 * x1 + 3872 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x5, tmp16, xmask)
tl.store(out_ptr1 + (x6 + 14464 * x4), tmp41, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 46656
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 729 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 5832
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 729 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (2, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 62, 62), (61504, 3844, 62, 1))
buf1 = empty_strided_cuda((4, 16, 62, 62), (61952, 3872, 62, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(246016)](buf0, primals_2,
buf1, 246016, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 16, 30, 30), (14400, 900, 30, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 16, 30, 30), (14464, 900, 30, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(57600)](buf1, buf2,
buf3, 57600, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 27, 27), (11664, 729, 27, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(46656)](buf5, primals_5,
46656, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 2, 27, 27), (1458, 729, 27, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(5832)](buf7, primals_7, 5832,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf3, buf5)
class NetFCN12New(nn.Module):
def __init__(self):
super(NetFCN12New, self).__init__()
self.conv = nn.Conv2d(3, 16, 3)
self.pool = nn.MaxPool2d((3, 3), stride=2)
self.conv2 = nn.Conv2d(16, 16, 4)
self.conv3 = nn.Conv2d(16, 2, 1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| RoyHirsch/DeepLearningCourse | NetFCN12 | false | 1,010 | [
"MIT"
] | 0 | 9036c0fdbb08b610524d7be991f8e4b490a82c6c | https://github.com/RoyHirsch/DeepLearningCourse/tree/9036c0fdbb08b610524d7be991f8e4b490a82c6c | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 16, 3)
self.pool = nn.MaxPool2d((3, 3), stride=2)
self.conv2 = nn.Conv2d(16, 16, 4)
self.conv3 = nn.Conv2d(16, 2, 1)
def forward(self, x):
x = self.conv(x)
x = self.pool(F.relu(x))
x = F.relu(self.conv2(x))
x = self.conv3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
AttentionConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xy/cxy77nk6tbfgwqyn5amat6tjjfsjhqlutzehwqrmr3xcuh7sc46s.py
# Topologically Sorted Source Nodes: [v_out_1, k_out_2, einsum], Original ATen: [aten.unfold, aten.cat, aten.mul]
# Source node to ATen node mapping:
# einsum => mul_1
# k_out_2 => cat
# v_out_1 => unfold_3
# Graph fragment:
# %unfold_3 : [num_users=2] = call_function[target=torch.ops.aten.unfold.default](args = (%unfold_2, 3, 4, 1), kwargs = {})
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add, %add_1], 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %permute_1), kwargs = {})
triton_poi_fused_cat_mul_unfold_0 = async_compile.triton('triton_poi_fused_cat_mul_unfold_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_mul_unfold_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_mul_unfold_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x3 = (xindex // 16) % 4
x4 = (xindex // 64)
x5 = xindex % 16
x2 = (xindex // 4) % 4
x1 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp20 = tl.load(in_ptr3 + (x0), xmask)
tmp1 = x3
tmp2 = tl.full([1], 0, tl.int64)
tmp3 = tmp1 >= tmp2
tmp4 = tl.full([1], 2, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + (x5 + (16*x3) + (64*x4)), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x2 + (4*x3)), tmp5 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tmp11 = tmp1 >= tmp4
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp1 < tmp12
tmp14 = tl.load(in_ptr0 + (32 + x5 + (16*((-2) + x3)) + (64*x4)), tmp11 & xmask, other=0.0)
tmp15 = tl.load(in_ptr2 + (x1 + (4*((-2) + x3))), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp5, tmp10, tmp18)
tmp21 = 0.0
tmp22 = tmp19 >= tmp21
tmp23 = 1.0
tmp24 = -1.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 - tmp26
tmp28 = tmp25 * tmp19
tmp29 = tmp27 * tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp30 / tmp30
tmp32 = tmp31 * tmp0
tl.store(in_out_ptr0 + (x0), tmp0, xmask)
tl.store(out_ptr0 + (x0), tmp19, xmask)
tl.store(out_ptr1 + (x0), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (2, 1, 1, 4, 1), (4, 4, 4, 1, 1))
assert_size_stride(primals_6, (2, 1, 1, 1, 4), (4, 4, 4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [q_out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [k_out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [v_out], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 4, 4), (64, 16, 16, 4, 4, 1), 0); del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 1, 1, 4, 4), (64, 16, 16, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 4, 4, 1), (64, 64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_out_1, k_out_2, einsum], Original ATen: [aten.unfold, aten.cat, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_mul_unfold_0.run(buf3, buf1, primals_5, primals_6, buf0, buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf1
del primals_5
del primals_6
return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, primals_2, primals_3, primals_4, buf0, buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, 1, 1, 4, 1), (4, 4, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 1, 1, 1, 4), (4, 4, 4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class AttentionConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super(AttentionConv, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, _channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self
.padding])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.
groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch,
-1, height, width)
return out
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_mul_unfold_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x3 = xindex // 16 % 4
x4 = xindex // 64
x5 = xindex % 16
x2 = xindex // 4 % 4
x1 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp20 = tl.load(in_ptr3 + x0, xmask)
tmp1 = x3
tl.full([1], 0, tl.int64)
tmp4 = tl.full([1], 2, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + (x5 + 16 * x3 + 64 * x4), tmp5 & xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x2 + 4 * x3), tmp5 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp5, tmp8, tmp9)
tmp11 = tmp1 >= tmp4
tl.full([1], 4, tl.int64)
tmp14 = tl.load(in_ptr0 + (32 + x5 + 16 * (-2 + x3) + 64 * x4), tmp11 &
xmask, other=0.0)
tmp15 = tl.load(in_ptr2 + (x1 + 4 * (-2 + x3)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp11, tmp16, tmp17)
tmp19 = tl.where(tmp5, tmp10, tmp18)
tmp21 = 0.0
tmp22 = tmp19 >= tmp21
tmp23 = 1.0
tmp24 = -1.0
tmp25 = tl.where(tmp22, tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 - tmp26
tmp28 = tmp25 * tmp19
tmp29 = tmp27 * tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp30 / tmp30
tmp32 = tmp31 * tmp0
tl.store(in_out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr0 + x0, tmp19, xmask)
tl.store(out_ptr1 + x0, tmp32, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (2, 1, 1, 4, 1), (4, 4, 4, 1, 1))
assert_size_stride(primals_6, (2, 1, 1, 1, 4), (4, 4, 4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = extern_kernels.convolution(primals_1, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1, 4, 4), (64, 16, 16, 4,
4, 1), 0)
del buf2
buf4 = empty_strided_cuda((4, 4, 1, 1, 4, 4), (64, 16, 16, 16, 4, 1
), torch.float32)
buf5 = empty_strided_cuda((4, 1, 4, 4, 4, 1), (64, 64, 16, 4, 1, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_mul_unfold_0[grid(256)](buf3, buf1, primals_5,
primals_6, buf0, buf4, buf5, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del primals_5
del primals_6
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, primals_2, primals_3, primals_4, buf0, buf3, buf4
class AttentionConvNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super(AttentionConvNew, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
def forward(self, input_0):
primals_5 = self.rel_h
primals_6 = self.rel_w
primals_2 = self.key_conv.weight
primals_3 = self.query_conv.weight
primals_4 = self.value_conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| Sam-limyr/End-to-end-ASR-Pytorch | AttentionConv | false | 1,011 | [
"MIT"
] | 0 | 623a50792f48218228549ea17b8ea5e8bb1b342f | https://github.com/Sam-limyr/End-to-end-ASR-Pytorch/tree/623a50792f48218228549ea17b8ea5e8bb1b342f | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, _channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self
.padding])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.
groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch,
-1, height, width)
return out
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mz/cmz3wjq2uutgv7zzhrquuijmcstklp4wvd4q2ptdi3fpwbjqcpo6.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_3 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf4, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf3, primals_5, primals_1, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ResBlock(nn.Module):
def __init__(self, size):
super().__init__()
self.size = size
self.layer1 = nn.Linear(self.size, self.size)
self.relu = nn.ReLU()
self.layer2 = nn.Linear(self.size, self.size)
def forward(self, x):
shortcut = x
x = self.layer1(x)
x = self.relu(x)
x = self.layer2(x)
x = x + shortcut
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_1[grid(256)](buf3, primals_5, primals_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf4
class ResBlockNew(nn.Module):
def __init__(self, size):
super().__init__()
self.size = size
self.layer1 = nn.Linear(self.size, self.size)
self.relu = nn.ReLU()
self.layer2 = nn.Linear(self.size, self.size)
def forward(self, input_0):
primals_2 = self.layer1.weight
primals_3 = self.layer1.bias
primals_4 = self.layer2.weight
primals_5 = self.layer2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| RosarioAndolina/psychXRF | ResBlock | false | 1,012 | [
"MIT"
] | 0 | e2adadbd17664d7f74c10304f84b3751c571226e | https://github.com/RosarioAndolina/psychXRF/tree/e2adadbd17664d7f74c10304f84b3751c571226e | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, size):
super().__init__()
self.size = size
self.layer1 = nn.Linear(self.size, self.size)
self.relu = nn.ReLU()
self.layer2 = nn.Linear(self.size, self.size)
def forward(self, x):
shortcut = x
x = self.layer1(x)
x = self.relu(x)
x = self.layer2(x)
x = x + shortcut
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
LN_TD3Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# xu => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yv/cyvyo4ngtqx5yqi3aouvj5rpeudtztqrqbcgywvc5tyw4we3zr4n.py
# Topologically Sorted Source Nodes: [x1, x1_1], Original ATen: [aten.relu, aten.native_layer_norm]
# Source node to ATen node mapping:
# x1 => relu
# x1_1 => add, rsqrt, var_mean
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%addmm,), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%relu, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_relu_1 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + (x0), tmp13, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/n3/cn3ucymmjowa2gnsrhjc57k3cvwek7mc56s6tp6mjfw5s53i4pqk.py
# Topologically Sorted Source Nodes: [x1, x1_1], Original ATen: [aten.relu, aten.native_layer_norm]
# Source node to ATen node mapping:
# x1 => relu
# x1_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%addmm,), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%relu, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_5), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {})
triton_poi_fused_native_layer_norm_relu_2 = async_compile.triton('triton_poi_fused_native_layer_norm_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (1, 4), (4, 1))
assert_size_stride(primals_12, (1, ), (1, ))
assert_size_stride(primals_13, (4, 8), (8, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4, ), (1, ))
assert_size_stride(primals_19, (4, ), (1, ))
assert_size_stride(primals_20, (4, ), (1, ))
assert_size_stride(primals_21, (1, 4), (4, 1))
assert_size_stride(primals_22, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [xu], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [x1, x1_1], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_1.run(buf1, buf2, buf3, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1, x1_1], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_2.run(buf1, buf2, buf3, primals_5, primals_6, buf4, 16, grid=grid(16), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = buf3; del buf3 # reuse
buf7 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x1_2, x1_3], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_1.run(buf5, buf6, buf7, 4, grid=grid(4), stream=stream0)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1_2, x1_3], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_2.run(buf5, buf6, buf7, primals_9, primals_10, buf8, 16, grid=grid(16), stream=stream0)
del primals_10
buf10 = reinterpret_tensor(buf7, (4, 1), (1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x1_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, buf8, reinterpret_tensor(primals_11, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_12
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_14, buf0, reinterpret_tensor(primals_13, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf11)
del primals_13
del primals_14
buf12 = buf6; del buf6 # reuse
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [x2, x2_1], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_1.run(buf11, buf12, buf13, 4, grid=grid(4), stream=stream0)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x2, x2_1], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_2.run(buf11, buf12, buf13, primals_15, primals_16, buf14, 16, grid=grid(16), stream=stream0)
del primals_16
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_18, buf14, reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_18
buf16 = buf13; del buf13 # reuse
buf17 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x2_2, x2_3], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_1.run(buf15, buf16, buf17, 4, grid=grid(4), stream=stream0)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x2_2, x2_3], Original ATen: [aten.relu, aten.native_layer_norm]
triton_poi_fused_native_layer_norm_relu_2.run(buf15, buf16, buf17, primals_19, primals_20, buf18, 16, grid=grid(16), stream=stream0)
del buf16
del primals_20
buf20 = reinterpret_tensor(buf17, (4, 1), (1, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [x2_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_22, buf18, reinterpret_tensor(primals_21, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf20)
del primals_22
return (buf10, buf20, primals_5, primals_9, primals_15, primals_19, buf0, buf1, buf4, buf5, buf8, buf11, buf14, buf15, buf18, primals_21, primals_17, primals_11, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LN_TD3Critic(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size1, hidden_size2):
super(LN_TD3Critic, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, 1)
self.l4 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln4 = nn.LayerNorm(hidden_size1)
self.l5 = nn.Linear(hidden_size1, hidden_size2)
self.ln5 = nn.LayerNorm(hidden_size2)
self.l6 = nn.Linear(hidden_size2, 1)
def forward(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = self.ln1(x1)
x1 = F.relu(self.l2(x1))
x1 = self.ln2(x1)
x1 = self.l3(x1)
x2 = F.relu(self.l4(xu))
x2 = self.ln4(x2)
x2 = F.relu(self.l5(x2))
x2 = self.ln5(x2)
x2 = self.l6(x2)
return x1, x2
def Q1(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = self.ln1(x1)
x1 = F.relu(self.l2(x1))
x1 = self.ln2(x1)
x1 = self.l3(x1)
return x1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'hidden_size1': 4,
'hidden_size2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_relu_2(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (1, 4), (4, 1))
assert_size_stride(primals_12, (1,), (1,))
assert_size_stride(primals_13, (4, 8), (8, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (1, 4), (4, 1))
assert_size_stride(primals_22, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3,
(8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_relu_1[grid(4)](buf1, buf2, buf3,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_2[grid(16)](buf1, buf2,
buf3, primals_5, primals_6, buf4, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = buf3
del buf3
buf7 = buf2
del buf2
triton_poi_fused_native_layer_norm_relu_1[grid(4)](buf5, buf6, buf7,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_2[grid(16)](buf5, buf6,
buf7, primals_9, primals_10, buf8, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_10
buf10 = reinterpret_tensor(buf7, (4, 1), (1, 1), 0)
del buf7
extern_kernels.addmm(primals_12, buf8, reinterpret_tensor(
primals_11, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_12
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_14, buf0, reinterpret_tensor(
primals_13, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf11)
del primals_13
del primals_14
buf12 = buf6
del buf6
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_native_layer_norm_relu_1[grid(4)](buf11, buf12,
buf13, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_2[grid(16)](buf11, buf12,
buf13, primals_15, primals_16, buf14, 16, XBLOCK=16, num_warps=
1, num_stages=1)
del primals_16
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_18, buf14, reinterpret_tensor(
primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_18
buf16 = buf13
del buf13
buf17 = buf12
del buf12
triton_poi_fused_native_layer_norm_relu_1[grid(4)](buf15, buf16,
buf17, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_relu_2[grid(16)](buf15, buf16,
buf17, primals_19, primals_20, buf18, 16, XBLOCK=16, num_warps=
1, num_stages=1)
del buf16
del primals_20
buf20 = reinterpret_tensor(buf17, (4, 1), (1, 1), 0)
del buf17
extern_kernels.addmm(primals_22, buf18, reinterpret_tensor(
primals_21, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf20)
del primals_22
return (buf10, buf20, primals_5, primals_9, primals_15, primals_19,
buf0, buf1, buf4, buf5, buf8, buf11, buf14, buf15, buf18,
primals_21, primals_17, primals_11, primals_7)
class LN_TD3CriticNew(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size1, hidden_size2):
super(LN_TD3CriticNew, self).__init__()
self.l1 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, 1)
self.l4 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln4 = nn.LayerNorm(hidden_size1)
self.l5 = nn.Linear(hidden_size1, hidden_size2)
self.ln5 = nn.LayerNorm(hidden_size2)
self.l6 = nn.Linear(hidden_size2, 1)
def Q1(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = self.ln1(x1)
x1 = F.relu(self.l2(x1))
x1 = self.ln2(x1)
x1 = self.l3(x1)
return x1
def forward(self, input_0, input_1):
primals_3 = self.l1.weight
primals_4 = self.l1.bias
primals_5 = self.ln1.weight
primals_6 = self.ln1.bias
primals_1 = self.l2.weight
primals_8 = self.l2.bias
primals_9 = self.ln2.weight
primals_10 = self.ln2.bias
primals_11 = self.l3.weight
primals_12 = self.l3.bias
primals_13 = self.l4.weight
primals_14 = self.l4.bias
primals_15 = self.ln4.weight
primals_16 = self.ln4.bias
primals_2 = self.l5.weight
primals_18 = self.l5.bias
primals_19 = self.ln5.weight
primals_20 = self.ln5.bias
primals_21 = self.l6.weight
primals_22 = self.l6.bias
primals_7 = input_0
primals_17 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22])
return output[0], output[1]
| RohanPankaj/apex | LN_TD3Critic | false | 1,013 | [
"MIT"
] | 0 | 74e96386bf9446d1179106d6d65ea0368c1b5b27 | https://github.com/RohanPankaj/apex/tree/74e96386bf9446d1179106d6d65ea0368c1b5b27 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size1, hidden_size2):
super().__init__()
self.l1 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln1 = nn.LayerNorm(hidden_size1)
self.l2 = nn.Linear(hidden_size1, hidden_size2)
self.ln2 = nn.LayerNorm(hidden_size2)
self.l3 = nn.Linear(hidden_size2, 1)
self.l4 = nn.Linear(state_dim + action_dim, hidden_size1)
self.ln4 = nn.LayerNorm(hidden_size1)
self.l5 = nn.Linear(hidden_size1, hidden_size2)
self.ln5 = nn.LayerNorm(hidden_size2)
self.l6 = nn.Linear(hidden_size2, 1)
def forward(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = self.ln1(x1)
x1 = F.relu(self.l2(x1))
x1 = self.ln2(x1)
x1 = self.l3(x1)
x2 = F.relu(self.l4(xu))
x2 = self.ln4(x2)
x2 = F.relu(self.l5(x2))
x2 = self.ln5(x2)
x2 = self.l6(x2)
return x1, x2
def Q1(self, inputs, actions):
xu = torch.cat([inputs, actions], 1)
x1 = F.relu(self.l1(xu))
x1 = self.ln1(x1)
x1 = F.relu(self.l2(x1))
x1 = self.ln2(x1)
x1 = self.l3(x1)
return x1
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'action_dim': 4, 'hidden_size1': 4,
'hidden_size2': 4}]
|
Transformer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch as t
import torch.nn as nn
from torch.distributions.categorical import Categorical
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
class Transformer(nn.Module):
def __init__(self, input_size, num_actions, hidden_size, learning_rate=
0.0003):
super(Transformer, self).__init__()
self.num_actions = num_actions
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, num_actions)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.softmax(self.linear2(x), dim=1)
return x
def get_probs(self, state):
return self.forward(state)
def get_action(self, state):
state = state.unsqueeze(0)
probs = self.forward(Variable(state))
sampled_action = Categorical(probs.detach())
log_prob = t.log(probs.squeeze(0)[sampled_action])
return sampled_action, log_prob
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'num_actions': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch as t
import torch.nn as nn
from torch.distributions.categorical import Categorical
from torch.autograd import Variable
import torch.optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4, primals_4, buf5
class TransformerNew(nn.Module):
def __init__(self, input_size, num_actions, hidden_size, learning_rate=
0.0003):
super(TransformerNew, self).__init__()
self.num_actions = num_actions
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, num_actions)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
def get_probs(self, state):
return self.forward(state)
def get_action(self, state):
state = state.unsqueeze(0)
probs = self.forward(Variable(state))
sampled_action = Categorical(probs.detach())
log_prob = t.log(probs.squeeze(0)[sampled_action])
return sampled_action, log_prob
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| LucWeber/2048-RLenv | Transformer | false | 1,014 | [
"MIT"
] | 0 | 6beff54691f0436f0fbca6bdbb9430fd37eab37d | https://github.com/LucWeber/2048-RLenv/tree/6beff54691f0436f0fbca6bdbb9430fd37eab37d | import torch
import torch as t
import torch.nn as nn
from torch.distributions.categorical import Categorical
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
class Model(nn.Module):
def __init__(self, input_size, num_actions, hidden_size, learning_rate=
0.0003):
super().__init__()
self.num_actions = num_actions
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, num_actions)
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.softmax(self.linear2(x), dim=1)
return x
def get_probs(self, state):
return self.forward(state)
def get_action(self, state):
state = state.unsqueeze(0)
probs = self.forward(Variable(state))
sampled_action = Categorical(probs.detach())
log_prob = t.log(probs.squeeze(0)[sampled_action])
return sampled_action, log_prob
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
TwoLayerNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ai/caitnpldotnv4k4oj67wyecd2ig4qcjrnmr35rmx6o2vxx245xs3.py
# Topologically Sorted Source Nodes: [h_relu], Original ATen: [aten.clamp, aten.ge]
# Source node to ATen node mapping:
# h_relu => clamp_min
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, 0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_1, 0), kwargs = {})
triton_poi_fused_clamp_ge_0 = async_compile.triton('triton_poi_fused_clamp_ge_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp2 >= tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_relu], Original ATen: [aten.clamp, aten.ge]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_ge_0.run(buf0, primals_2, buf1, buf3, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'D_in': 4, 'H': 4, 'D_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp2 >= tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_0[grid(256)](buf0, primals_2, buf1, buf3,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = buf0
del buf0
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3
class TwoLayerNetNew(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNetNew, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Saran-nns/delve | TwoLayerNet | false | 1,015 | [
"MIT"
] | 0 | 3489d8aa13181b392d3c47a19f9d9a47d87f8790 | https://github.com/Saran-nns/delve/tree/3489d8aa13181b392d3c47a19f9d9a47d87f8790 | import torch
class Model(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super().__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
PixelwiseNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ot/cotu2ivn5oifbezvztnybj4hxdzlduw4unots44b65tvh2c2a6wn.py
# Topologically Sorted Source Nodes: [pow_1, mean, add, y, y_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# pow_1 => pow_1
# y => sqrt
# y_1 => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2.0), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt), kwargs = {})
triton_poi_fused_add_div_mean_pow_sqrt_0 = async_compile.triton('triton_poi_fused_add_div_mean_pow_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_pow_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_pow_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, mean, add, y, y_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_sqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class PixelwiseNorm(torch.nn.Module):
"""
------------------------------------------------------------------------------------
Pixelwise feature vector normalization.
reference:
https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L120
------------------------------------------------------------------------------------
"""
def __init__(self):
super(PixelwiseNorm, self).__init__()
@staticmethod
def forward(x, alpha=1e-08):
y = x.pow(2.0).mean(dim=1, keepdim=True).add(alpha).sqrt()
y = x / y
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_pow_sqrt_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_sqrt_0[grid(256)](arg0_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PixelwiseNormNew(torch.nn.Module):
"""
------------------------------------------------------------------------------------
Pixelwise feature vector normalization.
reference:
https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L120
------------------------------------------------------------------------------------
"""
def __init__(self):
super(PixelwiseNormNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| SashaMatsun/torch-GAN | PixelwiseNorm | false | 1,016 | [
"MIT"
] | 0 | 534a634530548d3f8b3a102c3e43e1cc64d8506d | https://github.com/SashaMatsun/torch-GAN/tree/534a634530548d3f8b3a102c3e43e1cc64d8506d | import torch
class Model(torch.nn.Module):
"""
------------------------------------------------------------------------------------
Pixelwise feature vector normalization.
reference:
https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L120
------------------------------------------------------------------------------------
"""
def __init__(self):
super().__init__()
@staticmethod
def forward(x, alpha=1e-08):
y = x.pow(2.0).mean(dim=1, keepdim=True).add(alpha).sqrt()
y = x / y
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CAMMNISTExtendedClassifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/2x/c2xnlfml7v6uboqk242zwfdxrawephuy6yfg2pq7i4yip73axefe.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ne/cnepmjd66uu3laeexeusfxab3aayptiri2wp2knrgtgmx52tvzxj.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/el/celpz7mimnmbibz7qq7jmbeys46qobeu3cs2pwbodz3v5kaii5iu.py
# Topologically Sorted Source Nodes: [conv2d, relu, pad], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
# Source node to ATen node mapping:
# conv2d => convolution
# pad => constant_pad_nd
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu, [2, 2, 2, 2], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_relu_2 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 8192], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 4225
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = (xindex // 65)
x2 = xindex % 65
y4 = yindex
y0 = yindex % 32
x5 = xindex
y1 = (yindex // 32)
tmp0 = (-2) + x3
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 61, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-2) + x2
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-124) + x2 + (61*x3) + (3721*y4)), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + (tl.broadcast_to(y0, [XBLOCK, YBLOCK])), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1, 1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + (y0 + (32*x5) + (135200*y1)), tmp17, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/l4/cl4ovepq3umdhc5dq5lkereeda2b63wapoag7js33bvo34o6r5qk.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32) % 32
x2 = (xindex // 1024) % 32
x3 = (xindex // 32768)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (4160*x2) + (135200*x3)), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (4160*x2) + (135200*x3)), None)
tmp3 = tl.load(in_ptr0 + (2080 + x0 + (64*x1) + (4160*x2) + (135200*x3)), None)
tmp5 = tl.load(in_ptr0 + (2112 + x0 + (64*x1) + (4160*x2) + (135200*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4), tmp6, None)
tl.store(out_ptr1 + (x4), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/lm/clm7iihcscfe7z4kfaeauxfl3cwsallxff6gtjtoatb4dm52gbly.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1, pad_1], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# pad_1 => constant_pad_nd_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%relu_1, [2, 2, 2, 2], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_convolution_relu_4 = async_compile.triton('triton_poi_fused_constant_pad_nd_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_convolution_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 2176) % 34
x1 = (xindex // 64) % 34
x3 = (xindex // 73984)
x4 = xindex % 2176
x0 = xindex % 64
x6 = xindex
tmp0 = (-2) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 30, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-2) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-3968) + x4 + (1920*x2) + (57600*x3)), tmp10 & xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + (x0), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + (x6), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/na/cnasg2q3johltgmojrkg4nbec322gl46i77upzgowwjyoudujnto.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 73984
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 17
x2 = (xindex // 1088)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (4352*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (4352*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (2176 + x0 + (128*x1) + (4352*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (2240 + x0 + (128*x1) + (4352*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3b/c3bdx5zauqq2m7ty5foifmdiznvmanqvnu6inufyeukwhezhvdpt.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.mean]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# x_3 => mean
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_2, [-1, -2], True), kwargs = {})
triton_red_fused_convolution_mean_relu_6 = async_compile.triton('triton_red_fused_convolution_mean_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1024, 128],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_convolution_mean_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_convolution_mean_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1024
rnumel = 113
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x1 = (xindex // 128) % 2
x0 = xindex % 128
x2 = (xindex // 256)
_tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = r3 + (113*x1)
tmp1 = tl.full([1, 1], 225, tl.int32)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + (128*((r3 + (113*x1)) % 225)) + (28800*x2)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp4 = tl.load(in_ptr1 + (tl.broadcast_to(x0, [XBLOCK, RBLOCK])), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tmp3 + tmp4
tmp6 = tl.full([1, 1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tl.full(tmp7.shape, 0, tmp7.dtype)
tmp9 = tl.where(tmp2, tmp7, tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = _tmp11 + tmp10
_tmp11 = tl.where(rmask & xmask, tmp12, _tmp11)
tmp11 = tl.sum(_tmp11, 1)[:, None]
tl.store(out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/do/cdowndifnly7ygophzfe3dcwfiqfqgwqhoee4zyjsibsrr7gz7oo.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.mean]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# x_3 => mean
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_2, [-1, -2], True), kwargs = {})
triton_per_fused_convolution_mean_relu_7 = async_compile.triton('triton_per_fused_convolution_mean_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 2],
reduction_hint=ReductionHint.OUTER_TINY,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_convolution_mean_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_convolution_mean_relu_7(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 512
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 128
x1 = (xindex // 128)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*r2) + (256*x1)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 225.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ge/cgej6ibs6rnbnwc2dfuwt7ui7inczmtabonaer7zvibwgyvndhxf.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/p7/cp7pjxasd7oc3lyx26szzdnaxvel6mpt2pgrovpnphqimwfdqkvq.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/sk/cskx43u5gpetob3mawqgd2whpgf4mhgwxwkcdlrrywlscrvheqpm.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_10 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 3721
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (3721*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (y0 + (32*x2) + (119072*y1)), tmp6, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 2048, 9, grid=grid(2048, 9), stream=stream0)
del primals_4
buf1 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 61, 61), (119072, 3721, 61, 1))
buf3 = empty_strided_cuda((4, 32, 65, 65), (135200, 1, 2080, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu, pad], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_relu_2.run(buf2, primals_2, buf3, 128, 4225, grid=grid(128, 4225), stream=stream0)
buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.float32)
buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf3, buf4, buf5, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 30, 30), (57600, 1, 1920, 64))
buf7 = empty_strided_cuda((4, 64, 34, 34), (73984, 1, 2176, 64), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, relu_1, pad_1], Original ATen: [aten.convolution, aten.relu, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_convolution_relu_4.run(buf6, primals_5, buf7, 295936, grid=grid(295936), stream=stream0)
buf8 = empty_strided_cuda((4, 64, 17, 17), (18496, 1, 1088, 64), torch.float32)
buf9 = empty_strided_cuda((4, 64, 17, 17), (18496, 1, 1088, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf7, buf8, buf9, 73984, grid=grid(73984), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf8, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 15, 15), (28800, 1, 1920, 128))
buf11 = empty_strided_cuda((4, 128, 1, 1, 2), (256, 1, 1024, 1024, 128), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.mean]
triton_red_fused_convolution_mean_relu_6.run(buf10, primals_7, buf11, 1024, 113, grid=grid(1024), stream=stream0)
buf12 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512), torch.float32)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2, x_3], Original ATen: [aten.convolution, aten.relu, aten.mean]
triton_per_fused_convolution_mean_relu_7.run(buf13, buf11, 512, 2, grid=grid(512), stream=stream0)
del buf11
buf14 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf13, (4, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf14)
del primals_9
buf15 = empty_strided_cuda((4, 128, 15, 15), (28800, 1, 1920, 128), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf10, primals_7, buf15, 115200, grid=grid(115200), stream=stream0)
del buf10
del primals_7
buf16 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_9.run(buf6, primals_5, buf16, 230400, grid=grid(230400), stream=stream0)
del buf6
del primals_5
buf17 = empty_strided_cuda((4, 32, 61, 61), (119072, 1, 1952, 32), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_10.run(buf2, primals_2, buf17, 128, 3721, grid=grid(128, 3721), stream=stream0)
del buf2
del primals_2
return (buf14, primals_1, primals_3, buf0, buf1, buf3, buf4, buf5, buf7, buf8, buf9, reinterpret_tensor(buf13, (4, 128), (128, 1), 0), primals_8, buf15, buf16, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn as nn
from torch import optim as optim
from torchvision import transforms as transforms
from torch.nn.functional import pad
class CAMMNISTExtendedClassifier(nn.Module):
def __init__(self):
super(CAMMNISTExtendedClassifier, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=4)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
self.relu3 = nn.ReLU()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
def forward(self, x):
x = self.pool1(pad(self.relu1(self.conv1(x)), [2, 2, 2, 2]))
x = self.pool2(pad(self.relu2(self.conv2(x)), [2, 2, 2, 2]))
x = self.relu3(self.conv3(x))
x = self.avgpool(x)
x = x.view(x.shape[0], -1)
return self.fc(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn as nn
from torch import optim as optim
from torchvision import transforms as transforms
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_2(in_ptr0, in_ptr1,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 4225
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex // 65
x2 = xindex % 65
y4 = yindex
y0 = yindex % 32
x5 = xindex
y1 = yindex // 32
tmp0 = -2 + x3
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 61, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + x2
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-124 + x2 + 61 * x3 + 3721 * y4), tmp10 &
xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr1 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp10 &
xmask & ymask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1, 1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + (y0 + 32 * x5 + 135200 * y1), tmp17, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32 % 32
x2 = xindex // 1024 % 32
x3 = xindex // 32768
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 4160 * x2 + 135200 * x3), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 4160 * x2 + 135200 * x3),
None)
tmp3 = tl.load(in_ptr0 + (2080 + x0 + 64 * x1 + 4160 * x2 + 135200 * x3
), None)
tmp5 = tl.load(in_ptr0 + (2112 + x0 + 64 * x1 + 4160 * x2 + 135200 * x3
), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x4, tmp6, None)
tl.store(out_ptr1 + x4, tmp16, None)
@triton.jit
def triton_poi_fused_constant_pad_nd_convolution_relu_4(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 2176 % 34
x1 = xindex // 64 % 34
x3 = xindex // 73984
x4 = xindex % 2176
x0 = xindex % 64
x6 = xindex
tmp0 = -2 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 30, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-3968 + x4 + 1920 * x2 + 57600 * x3), tmp10 &
xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + x0, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tl.store(out_ptr0 + x6, tmp17, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 73984
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 17
x2 = xindex // 1088
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4352 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4352 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2176 + x0 + 128 * x1 + 4352 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (2240 + x0 + 128 * x1 + 4352 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_red_fused_convolution_mean_relu_6(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 113
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x1 = xindex // 128 % 2
x0 = xindex % 128
x2 = xindex // 256
_tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = r3 + 113 * x1
tmp1 = tl.full([1, 1], 225, tl.int32)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 128 * ((r3 + 113 * x1) % 225) +
28800 * x2), rmask & tmp2 & xmask, eviction_policy='evict_last',
other=0.0)
tmp4 = tl.load(in_ptr1 + tl.broadcast_to(x0, [XBLOCK, RBLOCK]),
rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tmp3 + tmp4
tmp6 = tl.full([1, 1], 0, tl.int32)
tmp7 = triton_helpers.maximum(tmp6, tmp5)
tmp8 = tl.full(tmp7.shape, 0, tmp7.dtype)
tmp9 = tl.where(tmp2, tmp7, tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = _tmp11 + tmp10
_tmp11 = tl.where(rmask & xmask, tmp12, _tmp11)
tmp11 = tl.sum(_tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_per_fused_convolution_mean_relu_7(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 512
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 128
x1 = xindex // 128
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * r2 + 256 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 225.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0,
in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 128
xnumel = 3721
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 3721 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (y0 + 32 * x2 + 119072 * y1), tmp6, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (10, 128), (128, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 9)](primals_4, buf0, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_1[grid(8192, 9)](primals_6, buf1, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 61, 61), (119072, 3721, 61, 1))
buf3 = empty_strided_cuda((4, 32, 65, 65), (135200, 1, 2080, 32),
torch.float32)
triton_poi_fused_constant_pad_nd_convolution_relu_2[grid(128, 4225)](
buf2, primals_2, buf3, 128, 4225, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32),
torch.float32)
buf5 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf3, buf4,
buf5, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 30, 30), (57600, 1, 1920, 64))
buf7 = empty_strided_cuda((4, 64, 34, 34), (73984, 1, 2176, 64),
torch.float32)
triton_poi_fused_constant_pad_nd_convolution_relu_4[grid(295936)](buf6,
primals_5, buf7, 295936, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = empty_strided_cuda((4, 64, 17, 17), (18496, 1, 1088, 64),
torch.float32)
buf9 = empty_strided_cuda((4, 64, 17, 17), (18496, 1, 1088, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(73984)](buf7, buf8,
buf9, 73984, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = extern_kernels.convolution(buf8, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 128, 15, 15), (28800, 1, 1920, 128))
buf11 = empty_strided_cuda((4, 128, 1, 1, 2), (256, 1, 1024, 1024,
128), torch.float32)
triton_red_fused_convolution_mean_relu_6[grid(1024)](buf10,
primals_7, buf11, 1024, 113, XBLOCK=64, RBLOCK=8, num_warps=4,
num_stages=1)
buf12 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512),
torch.float32)
buf13 = buf12
del buf12
triton_per_fused_convolution_mean_relu_7[grid(512)](buf13, buf11,
512, 2, XBLOCK=256, num_warps=4, num_stages=1)
del buf11
buf14 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf13, (4, 128),
(128, 1), 0), reinterpret_tensor(primals_8, (128, 10), (1, 128),
0), alpha=1, beta=1, out=buf14)
del primals_9
buf15 = empty_strided_cuda((4, 128, 15, 15), (28800, 1, 1920, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(115200)](
buf10, primals_7, buf15, 115200, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf10
del primals_7
buf16 = empty_strided_cuda((4, 64, 30, 30), (57600, 1, 1920, 64),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_9[grid(230400)](
buf6, primals_5, buf16, 230400, XBLOCK=512, num_warps=8,
num_stages=1)
del buf6
del primals_5
buf17 = empty_strided_cuda((4, 32, 61, 61), (119072, 1, 1952, 32),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_10[grid(128, 3721)
](buf2, primals_2, buf17, 128, 3721, XBLOCK=8, YBLOCK=128,
num_warps=4, num_stages=1)
del buf2
del primals_2
return (buf14, primals_1, primals_3, buf0, buf1, buf3, buf4, buf5, buf7,
buf8, buf9, reinterpret_tensor(buf13, (4, 128), (128, 1), 0),
primals_8, buf15, buf16, buf17)
class CAMMNISTExtendedClassifierNew(nn.Module):
def __init__(self):
super(CAMMNISTExtendedClassifierNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=4)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
self.relu3 = nn.ReLU()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc.weight
primals_9 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| RobinMaas95/GTSRB_Visualization | CAMMNISTExtendedClassifier | false | 1,017 | [
"MIT"
] | 0 | fa837ff94e089a936ef4f4418970d262b35f70b6 | https://github.com/RobinMaas95/GTSRB_Visualization/tree/fa837ff94e089a936ef4f4418970d262b35f70b6 | import torch
from torch import nn as nn
from torch import optim as optim
from torchvision import transforms as transforms
from torch.nn.functional import pad
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=4)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
self.relu3 = nn.ReLU()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128, 10)
def forward(self, x):
x = self.pool1(pad(self.relu1(self.conv1(x)), [2, 2, 2, 2]))
x = self.pool2(pad(self.relu2(self.conv2(x)), [2, 2, 2, 2]))
x = self.relu3(self.conv3(x))
x = self.avgpool(x)
x = x.view(x.shape[0], -1)
return self.fc(x)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
Model | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/mb/cmb72vxh36b4k6lvmt4562lj3nrqtpyzst2qbon2yqx22gdjfa7x.py
# Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# y_pred => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mm,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, 16, grid=grid(16), stream=stream0)
return (buf1, buf1, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class Model(torch.nn.Module):
def __init__(self, D_in, D_out):
super(Model, self).__init__()
self.w1 = torch.nn.Parameter(torch.randn(D_in, D_out),
requires_grad=True)
self.sig = torch.nn.Sigmoid()
def forward(self, x):
y_pred = self.sig(x.mm(self.w1))
return y_pred
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'D_in': 4, 'D_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(16)](buf1, 16, XBLOCK=16, num_warps
=1, num_stages=1)
return buf1, buf1, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0)
class ModelNew(torch.nn.Module):
def __init__(self, D_in, D_out):
super(ModelNew, self).__init__()
self.w1 = torch.nn.Parameter(torch.randn(D_in, D_out),
requires_grad=True)
self.sig = torch.nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.w1
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| Saran-nns/gradients | Model | false | 1,018 | [
"MIT"
] | 0 | 67f9ff92589047828563dbbe30f225dca7ad47fd | https://github.com/Saran-nns/gradients/tree/67f9ff92589047828563dbbe30f225dca7ad47fd | import torch
class Model(torch.nn.Module):
def __init__(self, D_in, D_out):
super(Model, self).__init__()
self.w1 = torch.nn.Parameter(torch.randn(D_in, D_out),
requires_grad=True)
self.sig = torch.nn.Sigmoid()
def forward(self, x):
y_pred = self.sig(x.mm(self.w1))
return y_pred
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
DisentangledAELatent | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/q3/cq354n2touiebe3vjnslfwtenbjmnauh5czsdjtbvv5wx64dfalj.py
# Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# std => exp
# z => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%normal_functional, %exp), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %getitem), kwargs = {})
triton_poi_fused_add_exp_mul_0 = async_compile.triton('triton_poi_fused_add_exp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (64 + x0 + (128*x1)), xmask)
tmp6 = tl.load(in_ptr1 + (x0 + (128*x1)), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp0 * tmp4
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [z_variables], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 2, 4, 8), (64, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [eps], Original ATen: [aten.normal_functional]
buf2 = torch.ops.aten.normal_functional.default(buf1)
buf3 = buf2
del buf2
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, std, mul_1, z], Original ATen: [aten.mul, aten.exp, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_exp_mul_0.run(buf3, buf0, buf4, 256, grid=grid(256), stream=stream0)
return (buf4, reinterpret_tensor(buf0, (4, 2, 4, 8), (128, 32, 8, 1), 0), reinterpret_tensor(buf0, (4, 2, 4, 8), (128, 32, 8, 1), 64), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 2, 4, 8), (128, 32, 8, 1), 64), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class DisentangledAELatent(torch.nn.Module):
"""Dense Dientangled Latent Layer between encoder and decoder"""
def __init__(self, hidden_size: 'int', latent_size: 'int', dropout: 'float'
):
super(DisentangledAELatent, self).__init__()
self.latent_size = latent_size
self.hidden_size = hidden_size
self.dropout = dropout
self.latent = torch.nn.Linear(self.hidden_size, self.latent_size * 2)
@staticmethod
def reparameterize(mu, logvar, training=True):
if training:
std = logvar.mul(0.5).exp_()
eps = std.data.new(std.size()).normal_()
return eps.mul(std).add_(mu)
return mu
def forward(self, x, training=True):
z_variables = self.latent(x)
mu, logvar = torch.chunk(z_variables, 2, dim=1)
z = self.reparameterize(mu, logvar, training=training)
return z, mu, logvar
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'latent_size': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_exp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (64 + x0 + 128 * x1), xmask)
tmp6 = tl.load(in_ptr1 + (x0 + 128 * x1), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp0 * tmp4
tmp7 = tmp5 + tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 2, 4, 8), (64, 32, 8, 1), torch.float32)
buf2 = torch.ops.aten.normal_functional.default(buf1)
buf3 = buf2
del buf2
buf4 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_exp_mul_0[grid(256)](buf3, buf0, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf4, reinterpret_tensor(buf0, (4, 2, 4, 8), (128, 32, 8, 1), 0
), reinterpret_tensor(buf0, (4, 2, 4, 8), (128, 32, 8, 1), 64
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf0, (4, 2, 4, 8), (128, 32, 8, 1), 64), buf3
class DisentangledAELatentNew(torch.nn.Module):
"""Dense Dientangled Latent Layer between encoder and decoder"""
def __init__(self, hidden_size: 'int', latent_size: 'int', dropout: 'float'
):
super(DisentangledAELatentNew, self).__init__()
self.latent_size = latent_size
self.hidden_size = hidden_size
self.dropout = dropout
self.latent = torch.nn.Linear(self.hidden_size, self.latent_size * 2)
@staticmethod
def reparameterize(mu, logvar, training=True):
if training:
std = logvar.mul(0.5).exp_()
eps = std.data.new(std.size()).normal_()
return eps.mul(std).add_(mu)
return mu
def forward(self, input_0):
primals_1 = self.latent.weight
primals_2 = self.latent.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1], output[2]
| Saran-nns/traja | DisentangledAELatent | false | 1,019 | [
"MIT"
] | 0 | f2256cc47abd33377b3a87f110f4c8da1cf6765f | https://github.com/Saran-nns/traja/tree/f2256cc47abd33377b3a87f110f4c8da1cf6765f | import torch
class Model(torch.nn.Module):
"""Dense Dientangled Latent Layer between encoder and decoder"""
def __init__(self, hidden_size: 'int', latent_size: 'int', dropout: 'float'
):
super().__init__()
self.latent_size = latent_size
self.hidden_size = hidden_size
self.dropout = dropout
self.latent = torch.nn.Linear(self.hidden_size, self.latent_size * 2)
@staticmethod
def reparameterize(mu, logvar, training=True):
if training:
std = logvar.mul(0.5).exp_()
eps = std.data.new(std.size()).normal_()
return eps.mul(std).add_(mu)
return mu
def forward(self, x, training=True):
z_variables = self.latent(x)
mu, logvar = torch.chunk(z_variables, 2, dim=1)
z = self.reparameterize(mu, logvar, training=training)
return z, mu, logvar
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 0.5]
|
LayerCake | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ai/caitnpldotnv4k4oj67wyecd2ig4qcjrnmr35rmx6o2vxx245xs3.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clamp, aten.ge]
# Source node to ATen node mapping:
# x => clamp_min
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, 0), kwargs = {})
# %ge_4 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_1, 0), kwargs = {})
triton_poi_fused_clamp_ge_0 = async_compile.triton('triton_poi_fused_clamp_ge_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp2 >= tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clamp, aten.ge]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_ge_0.run(buf0, primals_2, buf1, buf15, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clamp, aten.ge]
triton_poi_fused_clamp_ge_0.run(buf2, primals_5, buf3, buf14, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clamp, aten.ge]
triton_poi_fused_clamp_ge_0.run(buf4, primals_7, buf5, buf13, 256, grid=grid(256), stream=stream0)
del primals_7
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clamp, aten.ge]
triton_poi_fused_clamp_ge_0.run(buf6, primals_9, buf7, buf12, 256, grid=grid(256), stream=stream0)
del primals_9
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.clamp, aten.ge]
triton_poi_fused_clamp_ge_0.run(buf8, primals_11, buf9, buf11, 256, grid=grid(256), stream=stream0)
del primals_11
buf10 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf10)
del primals_13
return (reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(buf9, (64, 4), (4, 1), 0), primals_12, buf11, primals_10, buf12, primals_8, buf13, primals_6, buf14, primals_4, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class LayerCake(torch.nn.Module):
def __init__(self, D_in, H1, H2, H3, H4, H5, D_out):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(LayerCake, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H1)
self.linear2 = torch.nn.Linear(H1, H2)
self.linear3 = torch.nn.Linear(H2, H3)
self.linear4 = torch.nn.Linear(H3, H4)
self.linear5 = torch.nn.Linear(H4, H5)
self.linear6 = torch.nn.Linear(H5, D_out)
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary (differentiable) operations on Tensors.
"""
x = self.linear1(x).clamp(min=0)
x = self.linear2(x).clamp(min=0)
x = self.linear3(x).clamp(min=0)
x = self.linear4(x).clamp(min=0)
x = self.linear5(x).clamp(min=0)
y_pred = self.linear6(x)
return y_pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'D_in': 4, 'H1': 4, 'H2': 4, 'H3': 4, 'H4': 4, 'H5': 4,
'D_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tmp2 >= tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_0[grid(256)](buf0, primals_2, buf1, buf15,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_0[grid(256)](buf2, primals_5, buf3, buf14,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = buf2
del buf2
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_0[grid(256)](buf4, primals_7, buf5, buf13,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = buf4
del buf4
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_0[grid(256)](buf6, primals_9, buf7, buf12,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = buf6
del buf6
extern_kernels.mm(reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf8)
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clamp_ge_0[grid(256)](buf8, primals_11, buf9,
buf11, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf10 = buf8
del buf8
extern_kernels.addmm(primals_13, reinterpret_tensor(buf9, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf10)
del primals_13
return (reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1),
0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0),
reinterpret_tensor(buf9, (64, 4), (4, 1), 0), primals_12, buf11,
primals_10, buf12, primals_8, buf13, primals_6, buf14, primals_4, buf15
)
class LayerCakeNew(torch.nn.Module):
def __init__(self, D_in, H1, H2, H3, H4, H5, D_out):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super(LayerCakeNew, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H1)
self.linear2 = torch.nn.Linear(H1, H2)
self.linear3 = torch.nn.Linear(H2, H3)
self.linear4 = torch.nn.Linear(H3, H4)
self.linear5 = torch.nn.Linear(H4, H5)
self.linear6 = torch.nn.Linear(H5, D_out)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_8 = self.linear4.weight
primals_9 = self.linear4.bias
primals_10 = self.linear5.weight
primals_11 = self.linear5.bias
primals_12 = self.linear6.weight
primals_13 = self.linear6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| Saran-nns/delve | LayerCake | false | 1,021 | [
"MIT"
] | 0 | 3489d8aa13181b392d3c47a19f9d9a47d87f8790 | https://github.com/Saran-nns/delve/tree/3489d8aa13181b392d3c47a19f9d9a47d87f8790 | import torch
class Model(torch.nn.Module):
def __init__(self, D_in, H1, H2, H3, H4, H5, D_out):
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
"""
super().__init__()
self.linear1 = torch.nn.Linear(D_in, H1)
self.linear2 = torch.nn.Linear(H1, H2)
self.linear3 = torch.nn.Linear(H2, H3)
self.linear4 = torch.nn.Linear(H3, H4)
self.linear5 = torch.nn.Linear(H4, H5)
self.linear6 = torch.nn.Linear(H5, D_out)
def forward(self, x):
"""
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary (differentiable) operations on Tensors.
"""
x = self.linear1(x).clamp(min=0)
x = self.linear2(x).clamp(min=0)
x = self.linear3(x).clamp(min=0)
x = self.linear4(x).clamp(min=0)
x = self.linear5(x).clamp(min=0)
y_pred = self.linear6(x)
return y_pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'D_in': 4, 'H1': 4, 'H2': 4, 'H3': 4, 'H4': 4, 'H5': 4,
'D_out': 4}]
|
GCN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/be/cbej2f3myglhqo2dienhyo4fp7tbscq32k7imbgc2psgl6gaxxhi.py
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.relu]
# Source node to ATen node mapping:
# add => add
# x => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %primals_4), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
triton_poi_fused_add_relu_0 = async_compile.triton('triton_poi_fused_add_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ul/culvxc5xcnacfjypzxghwcyc2445sqsz25ci4rib6axjxs3fv3so.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_default, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_default, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yr/cyr6fatjcqc5np3quy6arljtkkff4qjmueyb5b4pk5xvkxgrzuvd.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [add, x], Original ATen: [aten.add, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_relu_0.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [support_1], Original ATen: [aten.mm]
extern_kernels.mm(buf2, primals_5, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.addmm(primals_6, primals_3, buf3, alpha=1, beta=1, out=buf4)
del primals_6
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
del buf5
return (buf6, buf4, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import math
import torch
import torch.nn as nn
import torch.utils.data
import torch
from torch.nn.modules.module import Module
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class GCN_Spectral(Module):
""" Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """
def __init__(self, in_units: 'int', out_units: 'int', bias: 'bool'=True
) ->None:
super(GCN_Spectral, self).__init__()
self.in_units = in_units
self.out_units = out_units
self.weight = Parameter(torch.FloatTensor(in_units, out_units))
if bias:
self.bias = Parameter(torch.FloatTensor(out_units))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input: 'torch.Tensor', adj: 'torch.Tensor'
) ->torch.Tensor:
"""
weight=(input_dim X hid_dim)
:param input: (#samples X input_dim)
:param adj:
:return:
"""
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_units
) + ' -> ' + str(self.out_units) + ')'
class GCN(nn.Module):
def __init__(self, nfeat: 'int', nhid: 'int', nclass: 'int', dropout:
'float') ->None:
super(GCN, self).__init__()
self.gc1 = GCN_Spectral(nfeat, nhid)
self.gc2 = GCN_Spectral(nhid, nfeat)
self.dropout = dropout
def forward(self, x: 'torch.Tensor', adj: 'torch.Tensor') ->(torch.
Tensor, torch.Tensor):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return F.log_softmax(x, dim=1), x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import math
import torch.nn as nn
import torch.utils.data
import torch
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_4
buf3 = buf0
del buf0
extern_kernels.mm(buf2, primals_5, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, primals_3, buf3, alpha=1, beta=1,
out=buf4)
del primals_6
buf5 = buf3
del buf3
triton_poi_fused__log_softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf5
return buf6, buf4, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1,
4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)
class GCN_Spectral(Module):
""" Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """
def __init__(self, in_units: 'int', out_units: 'int', bias: 'bool'=True
) ->None:
super(GCN_Spectral, self).__init__()
self.in_units = in_units
self.out_units = out_units
self.weight = Parameter(torch.FloatTensor(in_units, out_units))
if bias:
self.bias = Parameter(torch.FloatTensor(out_units))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input: 'torch.Tensor', adj: 'torch.Tensor'
) ->torch.Tensor:
"""
weight=(input_dim X hid_dim)
:param input: (#samples X input_dim)
:param adj:
:return:
"""
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_units
) + ' -> ' + str(self.out_units) + ')'
class GCNNew(nn.Module):
def __init__(self, nfeat: 'int', nhid: 'int', nclass: 'int', dropout:
'float') ->None:
super(GCNNew, self).__init__()
self.gc1 = GCN_Spectral(nfeat, nhid)
self.gc2 = GCN_Spectral(nhid, nfeat)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.gc1.weight
primals_4 = self.gc1.bias
primals_2 = self.gc2.weight
primals_6 = self.gc2.bias
primals_3 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| SamujjwalSam/XC_GCN | GCN | false | 1,022 | [
"MIT"
] | 0 | 7902cbd6b3ebc7806655080979e8c52caa4a16e0 | https://github.com/SamujjwalSam/XC_GCN/tree/7902cbd6b3ebc7806655080979e8c52caa4a16e0 | from torch.nn import Module
import math
import torch
import torch.nn as nn
import torch.utils.data
import torch
from torch.nn.modules.module import Module
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class GCN_Spectral(Module):
""" Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """
def __init__(self, in_units: 'int', out_units: 'int', bias: 'bool'=True
) ->None:
super().__init__()
self.in_units = in_units
self.out_units = out_units
self.weight = Parameter(torch.FloatTensor(in_units, out_units))
if bias:
self.bias = Parameter(torch.FloatTensor(out_units))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) ->None:
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input: 'torch.Tensor', adj: 'torch.Tensor'
) ->torch.Tensor:
"""
weight=(input_dim X hid_dim)
:param input: (#samples X input_dim)
:param adj:
:return:
"""
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_units
) + ' -> ' + str(self.out_units) + ')'
class Model(nn.Module):
def __init__(self, nfeat: 'int', nhid: 'int', nclass: 'int', dropout:
'float') ->None:
super().__init__()
self.gc1 = GCN_Spectral(nfeat, nhid)
self.gc2 = GCN_Spectral(nhid, nfeat)
self.dropout = dropout
def forward(self, x: 'torch.Tensor', adj: 'torch.Tensor') ->(torch.
Tensor, torch.Tensor):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return F.log_softmax(x, dim=1), x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4, 0.5]
|
ReOrgLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/md/cmdvvkvftnyzlmlhenb5cojabxyds4oikrxtdom7hg54fjve7bri.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_3 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 2
x3 = (xindex // 2)
y0 = yindex % 4
y1 = (yindex // 4)
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + ((2*x2) + (4*(y0 // 2)) + (8*x3) + (64*y1) + (y0 % 2)), xmask & ymask)
tl.store(out_ptr0 + (x5 + (16*y4)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 2), (64, 16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch._utils
class ReOrgLayer(nn.Module):
def __init__(self, stride=2):
super(ReOrgLayer, self).__init__()
self.stride = stride
def forward(self, x):
assert x.data.dim() == 4
B, C, H, W = x.data.shape
hs = self.stride
ws = self.stride
assert H % hs == 0, 'The stride ' + str(self.stride
) + ' is not a proper divisor of height ' + str(H)
assert W % ws == 0, 'The stride ' + str(self.stride
) + ' is not a proper divisor of height ' + str(W)
x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(-2, -3
).contiguous()
x = x.view(B, C, H // hs * W // ws, hs, ws)
x = x.view(B, C, H // hs * W // ws, hs * ws).transpose(-1, -2
).contiguous()
x = x.view(B, C, ws * hs, H // ws, W // ws).transpose(1, 2).contiguous(
)
x = x.view(B, C * ws * hs, H // ws, W // ws)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 2
x3 = xindex // 2
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (2 * x2 + 4 * (y0 // 2) + 8 * x3 + 64 * y1 +
y0 % 2), xmask & ymask)
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 2), (64, 16, 4, 2, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK
=16, YBLOCK=16, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0),
class ReOrgLayerNew(nn.Module):
def __init__(self, stride=2):
super(ReOrgLayerNew, self).__init__()
self.stride = stride
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Sarathismg/Pose-Estimator-Old-Version | ReOrgLayer | false | 1,023 | [
"Apache-2.0"
] | 0 | ecaa03769323b94a4d7222e2d3606d1ce92a2fae | https://github.com/Sarathismg/Pose-Estimator-Old-Version/tree/ecaa03769323b94a4d7222e2d3606d1ce92a2fae | import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch._utils
class Model(nn.Module):
def __init__(self, stride=2):
super().__init__()
self.stride = stride
def forward(self, x):
assert x.data.dim() == 4
B, C, H, W = x.data.shape
hs = self.stride
ws = self.stride
assert H % hs == 0, 'The stride ' + str(self.stride
) + ' is not a proper divisor of height ' + str(H)
assert W % ws == 0, 'The stride ' + str(self.stride
) + ' is not a proper divisor of height ' + str(W)
x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(-2, -3
).contiguous()
x = x.view(B, C, H // hs * W // ws, hs, ws)
x = x.view(B, C, H // hs * W // ws, hs * ws).transpose(-1, -2
).contiguous()
x = x.view(B, C, ws * hs, H // ws, W // ws).transpose(1, 2).contiguous(
)
x = x.view(B, C * ws * hs, H // ws, W // ws)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TracedModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/yx/cyxpk7a4eq5vq4bzeif2nk6cwpcgf7ixzqxdcgvbuuwnhguxpc26.py
# Topologically Sorted Source Nodes: [sqrt, truediv, floor], Original ATen: [aten.sqrt, aten.div, aten.floor]
# Source node to ATen node mapping:
# floor => floor
# sqrt => sqrt
# truediv => div
# Graph fragment:
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%arg0_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sqrt, 5.0), kwargs = {})
# %floor : [num_users=1] = call_function[target=torch.ops.aten.floor.default](args = (%div,), kwargs = {})
triton_poi_fused_div_floor_sqrt_0 = async_compile.triton('triton_poi_fused_div_floor_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_floor_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_floor_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.sqrt(tmp0)
tmp2 = 0.2
tmp3 = tmp1 * tmp2
tmp4 = libdevice.floor(tmp3)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sqrt, truediv, floor], Original ATen: [aten.sqrt, aten.div, aten.floor]
stream0 = get_raw_stream(0)
triton_poi_fused_div_floor_sqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.autograd
class TracedModule(torch.nn.Module):
def forward(self, x):
x = x.type(torch.float32)
return torch.floor(torch.sqrt(x) / 5.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_floor_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.sqrt(tmp0)
tmp2 = 0.2
tmp3 = tmp1 * tmp2
tmp4 = libdevice.floor(tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_floor_sqrt_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TracedModuleNew(torch.nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ScorpioDoctor/antares02 | TracedModule | false | 1,024 | [
"BSD-3-Clause"
] | 0 | 631b817d2e98f351d1173b620d15c4a5efed11da | https://github.com/ScorpioDoctor/antares02/tree/631b817d2e98f351d1173b620d15c4a5efed11da | import torch
import torch.onnx
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.autograd
class Model(torch.nn.Module):
def forward(self, x):
x = x.type(torch.float32)
return torch.floor(torch.sqrt(x) / 5.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SRCNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/7a/c7a2sqxnc6bi7sq5fihvseqxlvh33ljnmvvaziqhjhuxequqirct.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.floor, aten.clamp, aten.rsub, aten._unsafe_index]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1, _unsafe_index_10, _unsafe_index_11, _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, _unsafe_index_2, _unsafe_index_3, _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, _unsafe_index_8, _unsafe_index_9, add, add_10, add_11, add_12, add_13, add_14, add_15, add_16, add_17, add_18, add_19, add_20, add_21, add_22, add_23, add_24, add_25, add_26, add_27, add_28, add_29, add_30, add_6, add_7, add_8, add_9, clamp_max, clamp_max_1, clamp_min, clamp_min_1, convert_element_type, floor, floor_1, iota, mul, mul_10, mul_11, mul_12, mul_13, mul_14, mul_15, mul_16, mul_17, mul_18, mul_19, mul_2, mul_20, mul_21, mul_22, mul_23, mul_24, mul_25, mul_26, mul_27, mul_28, mul_29, mul_3, mul_30, mul_31, mul_32, mul_33, mul_34, mul_35, mul_36, mul_37, mul_38, mul_39, mul_4, mul_40, mul_41, mul_42, mul_43, mul_44, mul_45, mul_5, mul_6, mul_7, mul_8, mul_9, sub, sub_10, sub_11, sub_12, sub_13, sub_14, sub_15, sub_16, sub_17, sub_18, sub_19, sub_2, sub_20, sub_21, sub_3, sub_6, sub_7, sub_8, sub_9
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.25), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %floor : [num_users=2] = call_function[target=torch.ops.aten.floor.default](args = (%sub,), kwargs = {})
# %floor_1 : [num_users=2] = call_function[target=torch.ops.aten.floor.default](args = (%unsqueeze,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %floor_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max : [num_users=6] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %floor), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_1 : [num_users=6] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max_1, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, -0.75), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, -3.75), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %add_6), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, -6.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %add_6), kwargs = {})
# %sub_7 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, -3.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_1, 1.25), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, 2.25), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %clamp_max_1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %clamp_max_1), kwargs = {})
# %add_8 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, 1), kwargs = {})
# %sub_9 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %clamp_max_1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, 1.25), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_8, 2.25), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %sub_9), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_9, %sub_9), kwargs = {})
# %add_9 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, 1), kwargs = {})
# %sub_11 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %clamp_max_1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, -0.75), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_11, -3.75), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %sub_11), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, -6.0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_10, %sub_11), kwargs = {})
# %sub_13 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_13, -3.0), kwargs = {})
# %add_11 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max, 1.0), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_11, -0.75), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_14, -3.75), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %add_11), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_15, -6.0), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_12, %add_11), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_16, -3.0), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 1.25), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_17, 2.25), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %clamp_max), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_18, %clamp_max), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, 1), kwargs = {})
# %sub_17 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %clamp_max), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, 1.25), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_20, 2.25), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %sub_17), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_21, %sub_17), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_22, 1), kwargs = {})
# %sub_19 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %clamp_max), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, -0.75), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_23, -3.75), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %sub_19), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_24, -6.0), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_15, %sub_19), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_25, -3.0), kwargs = {})
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_2, %clamp_max_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_2, %clamp_max_5]), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_2, %clamp_max_7]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_2, %clamp_max_9]), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index, %sub_7), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_1, %add_8), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_26, %mul_27), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_2, %add_9), kwargs = {})
# %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_16, %mul_28), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_3, %sub_13), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %mul_29), kwargs = {})
# %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_10, %clamp_max_3]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_10, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_10, %clamp_max_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_10, %clamp_max_9]), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_4, %sub_7), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_5, %add_8), kwargs = {})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_30, %mul_31), kwargs = {})
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_6, %add_9), kwargs = {})
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_19, %mul_32), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_7, %sub_13), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_20, %mul_33), kwargs = {})
# %_unsafe_index_8 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_18, %clamp_max_3]), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_18, %clamp_max_5]), kwargs = {})
# %_unsafe_index_10 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_18, %clamp_max_7]), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_18, %clamp_max_9]), kwargs = {})
# %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_8, %sub_7), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_9, %add_8), kwargs = {})
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_34, %mul_35), kwargs = {})
# %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_10, %add_9), kwargs = {})
# %add_23 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_22, %mul_36), kwargs = {})
# %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_11, %sub_13), kwargs = {})
# %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_23, %mul_37), kwargs = {})
# %_unsafe_index_12 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_26, %clamp_max_3]), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_26, %clamp_max_5]), kwargs = {})
# %_unsafe_index_14 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_26, %clamp_max_7]), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_26, %clamp_max_9]), kwargs = {})
# %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_12, %sub_7), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_13, %add_8), kwargs = {})
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_38, %mul_39), kwargs = {})
# %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_14, %add_9), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %mul_40), kwargs = {})
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_15, %sub_13), kwargs = {})
# %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_26, %mul_41), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_18, %sub_15), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, %add_13), kwargs = {})
# %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_42, %mul_43), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_24, %add_14), kwargs = {})
# %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_28, %mul_44), kwargs = {})
# %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_27, %sub_21), kwargs = {})
# %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_29, %mul_45), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0(in_out_ptr1, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = libdevice.floor(tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 - tmp9
tmp11 = tl.full([1], 0, tl.int64)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tl.full([1], 3, tl.int64)
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tmp15 = x0
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp16 + tmp2
tmp18 = tmp17 * tmp4
tmp19 = tmp18 - tmp2
tmp20 = libdevice.floor(tmp19)
tmp21 = tmp20.to(tl.int32)
tmp22 = tmp21 - tmp9
tmp23 = triton_helpers.maximum(tmp22, tmp11)
tmp24 = triton_helpers.minimum(tmp23, tmp13)
tmp25 = tl.load(in_ptr0 + (tmp24 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last')
tmp26 = tmp19 - tmp20
tmp27 = 0.0
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = 1.0
tmp30 = triton_helpers.minimum(tmp28, tmp29)
tmp31 = tmp30 + tmp29
tmp32 = -0.75
tmp33 = tmp31 * tmp32
tmp34 = -3.75
tmp35 = tmp33 - tmp34
tmp36 = tmp35 * tmp31
tmp37 = -6.0
tmp38 = tmp36 + tmp37
tmp39 = tmp38 * tmp31
tmp40 = -3.0
tmp41 = tmp39 - tmp40
tmp42 = tmp25 * tmp41
tmp43 = triton_helpers.maximum(tmp21, tmp11)
tmp44 = triton_helpers.minimum(tmp43, tmp13)
tmp45 = tl.load(in_ptr0 + (tmp44 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last')
tmp46 = 1.25
tmp47 = tmp30 * tmp46
tmp48 = 2.25
tmp49 = tmp47 - tmp48
tmp50 = tmp49 * tmp30
tmp51 = tmp50 * tmp30
tmp52 = tmp51 + tmp29
tmp53 = tmp45 * tmp52
tmp54 = tmp21 + tmp9
tmp55 = triton_helpers.maximum(tmp54, tmp11)
tmp56 = triton_helpers.minimum(tmp55, tmp13)
tmp57 = tl.load(in_ptr0 + (tmp56 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last')
tmp58 = tmp29 - tmp30
tmp59 = tmp58 * tmp46
tmp60 = tmp59 - tmp48
tmp61 = tmp60 * tmp58
tmp62 = tmp61 * tmp58
tmp63 = tmp62 + tmp29
tmp64 = tmp57 * tmp63
tmp65 = triton_helpers.maximum(tmp8, tmp11)
tmp66 = triton_helpers.minimum(tmp65, tmp13)
tmp67 = tl.load(in_ptr0 + (tmp24 + (4*tmp66) + (16*x2)), xmask, eviction_policy='evict_last')
tmp68 = tmp67 * tmp41
tmp69 = tl.full([1], 2, tl.int64)
tmp70 = tmp21 + tmp69
tmp71 = triton_helpers.maximum(tmp70, tmp11)
tmp72 = triton_helpers.minimum(tmp71, tmp13)
tmp73 = tl.load(in_ptr0 + (tmp72 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last')
tmp74 = 2.0
tmp75 = tmp74 - tmp30
tmp76 = tmp75 * tmp32
tmp77 = tmp76 - tmp34
tmp78 = tmp77 * tmp75
tmp79 = tmp78 + tmp37
tmp80 = tmp79 * tmp75
tmp81 = tmp80 - tmp40
tmp82 = tmp73 * tmp81
tmp83 = tl.load(in_ptr0 + (tmp44 + (4*tmp66) + (16*x2)), xmask, eviction_policy='evict_last')
tmp84 = tmp83 * tmp52
tmp85 = tl.load(in_ptr0 + (tmp56 + (4*tmp66) + (16*x2)), xmask, eviction_policy='evict_last')
tmp86 = tmp85 * tmp63
tmp87 = tmp8 + tmp9
tmp88 = triton_helpers.maximum(tmp87, tmp11)
tmp89 = triton_helpers.minimum(tmp88, tmp13)
tmp90 = tl.load(in_ptr0 + (tmp24 + (4*tmp89) + (16*x2)), xmask, eviction_policy='evict_last')
tmp91 = tmp90 * tmp41
tmp92 = tl.load(in_ptr0 + (tmp72 + (4*tmp66) + (16*x2)), xmask, eviction_policy='evict_last')
tmp93 = tmp92 * tmp81
tmp94 = tl.load(in_ptr0 + (tmp44 + (4*tmp89) + (16*x2)), xmask, eviction_policy='evict_last')
tmp95 = tmp94 * tmp52
tmp96 = tl.load(in_ptr0 + (tmp56 + (4*tmp89) + (16*x2)), xmask, eviction_policy='evict_last')
tmp97 = tmp96 * tmp63
tmp98 = tmp8 + tmp69
tmp99 = triton_helpers.maximum(tmp98, tmp11)
tmp100 = triton_helpers.minimum(tmp99, tmp13)
tmp101 = tl.load(in_ptr0 + (tmp24 + (4*tmp100) + (16*x2)), xmask, eviction_policy='evict_last')
tmp102 = tmp101 * tmp41
tmp103 = tl.load(in_ptr0 + (tmp72 + (4*tmp89) + (16*x2)), xmask, eviction_policy='evict_last')
tmp104 = tmp103 * tmp81
tmp105 = tl.load(in_ptr0 + (tmp44 + (4*tmp100) + (16*x2)), xmask, eviction_policy='evict_last')
tmp106 = tmp105 * tmp52
tmp107 = tl.load(in_ptr0 + (tmp56 + (4*tmp100) + (16*x2)), xmask, eviction_policy='evict_last')
tmp108 = tmp107 * tmp63
tmp109 = tl.load(in_ptr0 + (tmp72 + (4*tmp100) + (16*x2)), xmask, eviction_policy='evict_last')
tmp110 = tmp109 * tmp81
tmp111 = tmp42 + tmp53
tmp112 = tmp111 + tmp64
tmp113 = tmp112 + tmp82
tmp114 = tmp6 - tmp7
tmp115 = triton_helpers.maximum(tmp114, tmp27)
tmp116 = triton_helpers.minimum(tmp115, tmp29)
tmp117 = tmp116 + tmp29
tmp118 = tmp117 * tmp32
tmp119 = tmp118 - tmp34
tmp120 = tmp119 * tmp117
tmp121 = tmp120 + tmp37
tmp122 = tmp121 * tmp117
tmp123 = tmp122 - tmp40
tmp124 = tmp113 * tmp123
tmp125 = tmp68 + tmp84
tmp126 = tmp125 + tmp86
tmp127 = tmp126 + tmp93
tmp128 = tmp116 * tmp46
tmp129 = tmp128 - tmp48
tmp130 = tmp129 * tmp116
tmp131 = tmp130 * tmp116
tmp132 = tmp131 + tmp29
tmp133 = tmp127 * tmp132
tmp134 = tmp124 + tmp133
tmp135 = tmp91 + tmp95
tmp136 = tmp135 + tmp97
tmp137 = tmp136 + tmp104
tmp138 = tmp29 - tmp116
tmp139 = tmp138 * tmp46
tmp140 = tmp139 - tmp48
tmp141 = tmp140 * tmp138
tmp142 = tmp141 * tmp138
tmp143 = tmp142 + tmp29
tmp144 = tmp137 * tmp143
tmp145 = tmp134 + tmp144
tmp146 = tmp102 + tmp106
tmp147 = tmp146 + tmp108
tmp148 = tmp147 + tmp110
tmp149 = tmp74 - tmp116
tmp150 = tmp149 * tmp32
tmp151 = tmp150 - tmp34
tmp152 = tmp151 * tmp149
tmp153 = tmp152 + tmp37
tmp154 = tmp153 * tmp149
tmp155 = tmp154 - tmp40
tmp156 = tmp148 * tmp155
tmp157 = tmp145 + tmp156
tl.store(in_out_ptr1 + (x3), tmp157, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/f4/cf4q74veoggsxdgdkl43ap6cyqfylpfk3qs7wdqoebyfzzb36dvw.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_30, %primals_2, %primals_3, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/bj/cbjysb56yh4ggfzb72c3xdhbbnmqhfc3pvpexw6rfp2nme2jhyyl.py
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# out_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7x/c7xnwtrjfqhdkxhfsdsjlkr7ml5ojqmtd2lrl7npuiczn7woxe2e.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 256) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (64, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (3, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 3, 16, 16), (768, 256, 16, 1), torch.float32)
buf18 = buf10; del buf10 # reuse
buf20 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.floor, aten.clamp, aten.rsub, aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0.run(buf20, primals_1, 3072, grid=grid(3072), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf20, primals_2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 64, 16, 16), (16384, 256, 16, 1))
buf22 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf22, primals_3, 65536, grid=grid(65536), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf22, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 32, 16, 16), (8192, 256, 16, 1))
buf24 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf24, primals_5, 32768, grid=grid(32768), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf24, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 3, 16, 16), (768, 256, 16, 1))
buf26 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf26, primals_7, 3072, grid=grid(3072), stream=stream0)
del primals_7
return (buf26, primals_2, primals_4, primals_6, buf20, buf22, buf24, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 3, 9, 9), (243, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((3, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import logging
import torch
import torchvision
import warnings
from collections import OrderedDict
from torch.utils import model_zoo
from torch.nn import functional as F
import torch.nn as nn
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmedit".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = get_logger(__name__.split('.')[0], log_file, log_level)
return logger
def _get_mmcv_home():
mmcv_home = os.path.expanduser(os.getenv(ENV_MMCV_HOME, os.path.join(os
.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
mkdir_or_exist(mmcv_home)
return mmcv_home
def _process_mmcls_checkpoint(checkpoint):
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint
def get_deprecated_model_names():
deprecate_json_path = osp.join(mmcv.__path__[0],
'model_zoo/deprecated.json')
deprecate_urls = load_file(deprecate_json_path)
assert isinstance(deprecate_urls, dict)
return deprecate_urls
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls
def get_torchvision_models():
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module(f'torchvision.models.{name}')
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
def load_fileclient_dist(filename, backend, map_location):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
allowed_backends = ['ceph']
if backend not in allowed_backends:
raise ValueError(f'Load from Backend {backend} is not supported.')
if rank == 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint
def load_url_dist(url, model_dir=None):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
checkpoint = model_zoo.load_url(url, model_dir=model_dir)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
checkpoint = model_zoo.load_url(url, model_dir=model_dir)
return checkpoint
def _load_checkpoint(filename, map_location=None):
"""Load checkpoint from somewhere (modelzoo, file, url).
Args:
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str | None): Same as :func:`torch.load`. Default: None.
Returns:
dict | OrderedDict: The loaded checkpoint. It can be either an
OrderedDict storing model weights or a dict containing other
information, which depends on the checkpoint.
"""
if filename.startswith('modelzoo://'):
warnings.warn(
'The URL scheme of "modelzoo://" is deprecated, please use "torchvision://" instead'
)
model_urls = get_torchvision_models()
model_name = filename[11:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('torchvision://'):
model_urls = get_torchvision_models()
model_name = filename[14:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('open-mmlab://'):
model_urls = get_external_models()
model_name = filename[13:]
deprecated_urls = get_deprecated_model_names()
if model_name in deprecated_urls:
warnings.warn(
f'open-mmlab://{model_name} is deprecated in favor of open-mmlab://{deprecated_urls[model_name]}'
)
model_name = deprecated_urls[model_name]
model_url = model_urls[model_name]
if model_url.startswith(('http://', 'https://')):
checkpoint = load_url_dist(model_url)
else:
filename = osp.join(_get_mmcv_home(), model_url)
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
elif filename.startswith('mmcls://'):
model_urls = get_mmcls_models()
model_name = filename[8:]
checkpoint = load_url_dist(model_urls[model_name])
checkpoint = _process_mmcls_checkpoint(checkpoint)
elif filename.startswith(('http://', 'https://')):
checkpoint = load_url_dist(filename)
elif filename.startswith('pavi://'):
model_path = filename[7:]
checkpoint = load_pavimodel_dist(model_path, map_location=map_location)
elif filename.startswith('s3://'):
checkpoint = load_fileclient_dist(filename, backend='ceph',
map_location=map_location)
else:
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
if is_module_wrapper(module):
module = module.module
local_metadata = {} if metadata is None else metadata.get(prefix[:-
1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata,
True, all_missing_keys, unexpected_keys, err_msg)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(module)
load = None
missing_keys = [key for key in all_missing_keys if
'num_batches_tracked' not in key]
if unexpected_keys:
err_msg.append(
f"unexpected key in source state_dict: {', '.join(unexpected_keys)}\n"
)
if missing_keys:
err_msg.append(
f"missing keys in source state_dict: {', '.join(missing_keys)}\n")
rank, _ = get_dist_info()
if len(err_msg) > 0 and rank == 0:
err_msg.insert(0,
'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warning(err_msg)
else:
None
def load_checkpoint(model, filename, map_location='cpu', strict=False,
logger=None):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location)
if not isinstance(checkpoint, dict):
raise RuntimeError(f'No state_dict found in checkpoint file {filename}'
)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
if state_dict.get('absolute_pos_embed') is not None:
absolute_pos_embed = state_dict['absolute_pos_embed']
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = model.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H * W:
logger.warning('Error in loading absolute_pos_embed, pass')
else:
state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2,
H, W, C2).permute(0, 3, 1, 2)
relative_position_bias_table_keys = [k for k in state_dict.keys() if
'relative_position_bias_table' in k]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
table_current = model.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f'Error in loading {table_key}, pass')
elif L1 != L2:
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
table_pretrained_resized = F.interpolate(table_pretrained.
permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2), mode=
'bicubic')
state_dict[table_key] = table_pretrained_resized.view(nH2, L2
).permute(1, 0)
load_state_dict(model, state_dict, strict, logger)
return checkpoint
class SRCNN(nn.Module):
"""SRCNN network structure for image super resolution.
SRCNN has three conv layers. For each layer, we can define the
`in_channels`, `out_channels` and `kernel_size`.
The input image will first be upsampled with a bicubic upsampler, and then
super-resolved in the HR spatial size.
Paper: Learning a Deep Convolutional Network for Image Super-Resolution.
Args:
channels (tuple[int]): A tuple of channel numbers for each layer
including channels of input and output . Default: (3, 64, 32, 3).
kernel_sizes (tuple[int]): A tuple of kernel sizes for each conv layer.
Default: (9, 1, 5).
upscale_factor (int): Upsampling factor. Default: 4.
"""
def __init__(self, channels=(3, 64, 32, 3), kernel_sizes=(9, 1, 5),
upscale_factor=4):
super().__init__()
assert len(channels
) == 4, f'The length of channel tuple should be 4, but got {len(channels)}'
assert len(kernel_sizes
) == 3, f'The length of kernel tuple should be 3, but got {len(kernel_sizes)}'
self.upscale_factor = upscale_factor
self.img_upsampler = nn.Upsample(scale_factor=self.upscale_factor,
mode='bicubic', align_corners=False)
self.conv1 = nn.Conv2d(channels[0], channels[1], kernel_size=
kernel_sizes[0], padding=kernel_sizes[0] // 2)
self.conv2 = nn.Conv2d(channels[1], channels[2], kernel_size=
kernel_sizes[1], padding=kernel_sizes[1] // 2)
self.conv3 = nn.Conv2d(channels[2], channels[3], kernel_size=
kernel_sizes[2], padding=kernel_sizes[2] // 2)
self.relu = nn.ReLU()
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x = self.img_upsampler(x)
out = self.relu(self.conv1(x))
out = self.relu(self.conv2(out))
out = self.conv3(out)
return out
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is None:
pass
else:
raise TypeError(
f'"pretrained" must be a str or None. But received {type(pretrained)}.'
)
def get_inputs():
return [torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import logging
import torchvision
import warnings
from collections import OrderedDict
from torch.utils import model_zoo
from torch.nn import functional as F
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0(
in_out_ptr1, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = libdevice.floor(tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 - tmp9
tmp11 = tl.full([1], 0, tl.int64)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tl.full([1], 3, tl.int64)
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tmp15 = x0
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp16 + tmp2
tmp18 = tmp17 * tmp4
tmp19 = tmp18 - tmp2
tmp20 = libdevice.floor(tmp19)
tmp21 = tmp20.to(tl.int32)
tmp22 = tmp21 - tmp9
tmp23 = triton_helpers.maximum(tmp22, tmp11)
tmp24 = triton_helpers.minimum(tmp23, tmp13)
tmp25 = tl.load(in_ptr0 + (tmp24 + 4 * tmp14 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp26 = tmp19 - tmp20
tmp27 = 0.0
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = 1.0
tmp30 = triton_helpers.minimum(tmp28, tmp29)
tmp31 = tmp30 + tmp29
tmp32 = -0.75
tmp33 = tmp31 * tmp32
tmp34 = -3.75
tmp35 = tmp33 - tmp34
tmp36 = tmp35 * tmp31
tmp37 = -6.0
tmp38 = tmp36 + tmp37
tmp39 = tmp38 * tmp31
tmp40 = -3.0
tmp41 = tmp39 - tmp40
tmp42 = tmp25 * tmp41
tmp43 = triton_helpers.maximum(tmp21, tmp11)
tmp44 = triton_helpers.minimum(tmp43, tmp13)
tmp45 = tl.load(in_ptr0 + (tmp44 + 4 * tmp14 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp46 = 1.25
tmp47 = tmp30 * tmp46
tmp48 = 2.25
tmp49 = tmp47 - tmp48
tmp50 = tmp49 * tmp30
tmp51 = tmp50 * tmp30
tmp52 = tmp51 + tmp29
tmp53 = tmp45 * tmp52
tmp54 = tmp21 + tmp9
tmp55 = triton_helpers.maximum(tmp54, tmp11)
tmp56 = triton_helpers.minimum(tmp55, tmp13)
tmp57 = tl.load(in_ptr0 + (tmp56 + 4 * tmp14 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp58 = tmp29 - tmp30
tmp59 = tmp58 * tmp46
tmp60 = tmp59 - tmp48
tmp61 = tmp60 * tmp58
tmp62 = tmp61 * tmp58
tmp63 = tmp62 + tmp29
tmp64 = tmp57 * tmp63
tmp65 = triton_helpers.maximum(tmp8, tmp11)
tmp66 = triton_helpers.minimum(tmp65, tmp13)
tmp67 = tl.load(in_ptr0 + (tmp24 + 4 * tmp66 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp68 = tmp67 * tmp41
tmp69 = tl.full([1], 2, tl.int64)
tmp70 = tmp21 + tmp69
tmp71 = triton_helpers.maximum(tmp70, tmp11)
tmp72 = triton_helpers.minimum(tmp71, tmp13)
tmp73 = tl.load(in_ptr0 + (tmp72 + 4 * tmp14 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp74 = 2.0
tmp75 = tmp74 - tmp30
tmp76 = tmp75 * tmp32
tmp77 = tmp76 - tmp34
tmp78 = tmp77 * tmp75
tmp79 = tmp78 + tmp37
tmp80 = tmp79 * tmp75
tmp81 = tmp80 - tmp40
tmp82 = tmp73 * tmp81
tmp83 = tl.load(in_ptr0 + (tmp44 + 4 * tmp66 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp84 = tmp83 * tmp52
tmp85 = tl.load(in_ptr0 + (tmp56 + 4 * tmp66 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp86 = tmp85 * tmp63
tmp87 = tmp8 + tmp9
tmp88 = triton_helpers.maximum(tmp87, tmp11)
tmp89 = triton_helpers.minimum(tmp88, tmp13)
tmp90 = tl.load(in_ptr0 + (tmp24 + 4 * tmp89 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp91 = tmp90 * tmp41
tmp92 = tl.load(in_ptr0 + (tmp72 + 4 * tmp66 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp93 = tmp92 * tmp81
tmp94 = tl.load(in_ptr0 + (tmp44 + 4 * tmp89 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp95 = tmp94 * tmp52
tmp96 = tl.load(in_ptr0 + (tmp56 + 4 * tmp89 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp97 = tmp96 * tmp63
tmp98 = tmp8 + tmp69
tmp99 = triton_helpers.maximum(tmp98, tmp11)
tmp100 = triton_helpers.minimum(tmp99, tmp13)
tmp101 = tl.load(in_ptr0 + (tmp24 + 4 * tmp100 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp102 = tmp101 * tmp41
tmp103 = tl.load(in_ptr0 + (tmp72 + 4 * tmp89 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp104 = tmp103 * tmp81
tmp105 = tl.load(in_ptr0 + (tmp44 + 4 * tmp100 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp106 = tmp105 * tmp52
tmp107 = tl.load(in_ptr0 + (tmp56 + 4 * tmp100 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp108 = tmp107 * tmp63
tmp109 = tl.load(in_ptr0 + (tmp72 + 4 * tmp100 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp110 = tmp109 * tmp81
tmp111 = tmp42 + tmp53
tmp112 = tmp111 + tmp64
tmp113 = tmp112 + tmp82
tmp114 = tmp6 - tmp7
tmp115 = triton_helpers.maximum(tmp114, tmp27)
tmp116 = triton_helpers.minimum(tmp115, tmp29)
tmp117 = tmp116 + tmp29
tmp118 = tmp117 * tmp32
tmp119 = tmp118 - tmp34
tmp120 = tmp119 * tmp117
tmp121 = tmp120 + tmp37
tmp122 = tmp121 * tmp117
tmp123 = tmp122 - tmp40
tmp124 = tmp113 * tmp123
tmp125 = tmp68 + tmp84
tmp126 = tmp125 + tmp86
tmp127 = tmp126 + tmp93
tmp128 = tmp116 * tmp46
tmp129 = tmp128 - tmp48
tmp130 = tmp129 * tmp116
tmp131 = tmp130 * tmp116
tmp132 = tmp131 + tmp29
tmp133 = tmp127 * tmp132
tmp134 = tmp124 + tmp133
tmp135 = tmp91 + tmp95
tmp136 = tmp135 + tmp97
tmp137 = tmp136 + tmp104
tmp138 = tmp29 - tmp116
tmp139 = tmp138 * tmp46
tmp140 = tmp139 - tmp48
tmp141 = tmp140 * tmp138
tmp142 = tmp141 * tmp138
tmp143 = tmp142 + tmp29
tmp144 = tmp137 * tmp143
tmp145 = tmp134 + tmp144
tmp146 = tmp102 + tmp106
tmp147 = tmp146 + tmp108
tmp148 = tmp147 + tmp110
tmp149 = tmp74 - tmp116
tmp150 = tmp149 * tmp32
tmp151 = tmp150 - tmp34
tmp152 = tmp151 * tmp149
tmp153 = tmp152 + tmp37
tmp154 = tmp153 * tmp149
tmp155 = tmp154 - tmp40
tmp156 = tmp148 * tmp155
tmp157 = tmp145 + tmp156
tl.store(in_out_ptr1 + x3, tmp157, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 256 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (64, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (3, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 3, 16, 16), (768, 256, 16, 1), torch
.float32)
buf18 = buf10
del buf10
buf20 = buf18
del buf18
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0[
grid(3072)](buf20, primals_1, 3072, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
buf21 = extern_kernels.convolution(buf20, primals_2, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 64, 16, 16), (16384, 256, 16, 1))
buf22 = buf21
del buf21
triton_poi_fused_convolution_relu_1[grid(65536)](buf22, primals_3,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_3
buf23 = extern_kernels.convolution(buf22, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 32, 16, 16), (8192, 256, 16, 1))
buf24 = buf23
del buf23
triton_poi_fused_convolution_relu_2[grid(32768)](buf24, primals_5,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf25 = extern_kernels.convolution(buf24, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 3, 16, 16), (768, 256, 16, 1))
buf26 = buf25
del buf25
triton_poi_fused_convolution_3[grid(3072)](buf26, primals_7, 3072,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf26, primals_2, primals_4, primals_6, buf20, buf22, buf24
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmedit".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = get_logger(__name__.split('.')[0], log_file, log_level)
return logger
def _get_mmcv_home():
mmcv_home = os.path.expanduser(os.getenv(ENV_MMCV_HOME, os.path.join(os
.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
mkdir_or_exist(mmcv_home)
return mmcv_home
def _process_mmcls_checkpoint(checkpoint):
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint
def get_deprecated_model_names():
deprecate_json_path = osp.join(mmcv.__path__[0],
'model_zoo/deprecated.json')
deprecate_urls = load_file(deprecate_json_path)
assert isinstance(deprecate_urls, dict)
return deprecate_urls
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls
def get_torchvision_models():
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module(f'torchvision.models.{name}')
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
def load_fileclient_dist(filename, backend, map_location):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
allowed_backends = ['ceph']
if backend not in allowed_backends:
raise ValueError(f'Load from Backend {backend} is not supported.')
if rank == 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint
def load_url_dist(url, model_dir=None):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
checkpoint = model_zoo.load_url(url, model_dir=model_dir)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
checkpoint = model_zoo.load_url(url, model_dir=model_dir)
return checkpoint
def _load_checkpoint(filename, map_location=None):
"""Load checkpoint from somewhere (modelzoo, file, url).
Args:
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str | None): Same as :func:`torch.load`. Default: None.
Returns:
dict | OrderedDict: The loaded checkpoint. It can be either an
OrderedDict storing model weights or a dict containing other
information, which depends on the checkpoint.
"""
if filename.startswith('modelzoo://'):
warnings.warn(
'The URL scheme of "modelzoo://" is deprecated, please use "torchvision://" instead'
)
model_urls = get_torchvision_models()
model_name = filename[11:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('torchvision://'):
model_urls = get_torchvision_models()
model_name = filename[14:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('open-mmlab://'):
model_urls = get_external_models()
model_name = filename[13:]
deprecated_urls = get_deprecated_model_names()
if model_name in deprecated_urls:
warnings.warn(
f'open-mmlab://{model_name} is deprecated in favor of open-mmlab://{deprecated_urls[model_name]}'
)
model_name = deprecated_urls[model_name]
model_url = model_urls[model_name]
if model_url.startswith(('http://', 'https://')):
checkpoint = load_url_dist(model_url)
else:
filename = osp.join(_get_mmcv_home(), model_url)
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
elif filename.startswith('mmcls://'):
model_urls = get_mmcls_models()
model_name = filename[8:]
checkpoint = load_url_dist(model_urls[model_name])
checkpoint = _process_mmcls_checkpoint(checkpoint)
elif filename.startswith(('http://', 'https://')):
checkpoint = load_url_dist(filename)
elif filename.startswith('pavi://'):
model_path = filename[7:]
checkpoint = load_pavimodel_dist(model_path, map_location=map_location)
elif filename.startswith('s3://'):
checkpoint = load_fileclient_dist(filename, backend='ceph',
map_location=map_location)
else:
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
if is_module_wrapper(module):
module = module.module
local_metadata = {} if metadata is None else metadata.get(prefix[:-
1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata,
True, all_missing_keys, unexpected_keys, err_msg)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(module)
load = None
missing_keys = [key for key in all_missing_keys if
'num_batches_tracked' not in key]
if unexpected_keys:
err_msg.append(
f"unexpected key in source state_dict: {', '.join(unexpected_keys)}\n"
)
if missing_keys:
err_msg.append(
f"missing keys in source state_dict: {', '.join(missing_keys)}\n")
rank, _ = get_dist_info()
if len(err_msg) > 0 and rank == 0:
err_msg.insert(0,
'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warning(err_msg)
else:
None
def load_checkpoint(model, filename, map_location='cpu', strict=False,
logger=None):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location)
if not isinstance(checkpoint, dict):
raise RuntimeError(f'No state_dict found in checkpoint file {filename}'
)
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
if state_dict.get('absolute_pos_embed') is not None:
absolute_pos_embed = state_dict['absolute_pos_embed']
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = model.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H * W:
logger.warning('Error in loading absolute_pos_embed, pass')
else:
state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2,
H, W, C2).permute(0, 3, 1, 2)
relative_position_bias_table_keys = [k for k in state_dict.keys() if
'relative_position_bias_table' in k]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
table_current = model.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f'Error in loading {table_key}, pass')
elif L1 != L2:
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
table_pretrained_resized = F.interpolate(table_pretrained.
permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2), mode=
'bicubic')
state_dict[table_key] = table_pretrained_resized.view(nH2, L2
).permute(1, 0)
load_state_dict(model, state_dict, strict, logger)
return checkpoint
class SRCNNNew(nn.Module):
"""SRCNN network structure for image super resolution.
SRCNN has three conv layers. For each layer, we can define the
`in_channels`, `out_channels` and `kernel_size`.
The input image will first be upsampled with a bicubic upsampler, and then
super-resolved in the HR spatial size.
Paper: Learning a Deep Convolutional Network for Image Super-Resolution.
Args:
channels (tuple[int]): A tuple of channel numbers for each layer
including channels of input and output . Default: (3, 64, 32, 3).
kernel_sizes (tuple[int]): A tuple of kernel sizes for each conv layer.
Default: (9, 1, 5).
upscale_factor (int): Upsampling factor. Default: 4.
"""
def __init__(self, channels=(3, 64, 32, 3), kernel_sizes=(9, 1, 5),
upscale_factor=4):
super().__init__()
assert len(channels
) == 4, f'The length of channel tuple should be 4, but got {len(channels)}'
assert len(kernel_sizes
) == 3, f'The length of kernel tuple should be 3, but got {len(kernel_sizes)}'
self.upscale_factor = upscale_factor
self.img_upsampler = nn.Upsample(scale_factor=self.upscale_factor,
mode='bicubic', align_corners=False)
self.conv1 = nn.Conv2d(channels[0], channels[1], kernel_size=
kernel_sizes[0], padding=kernel_sizes[0] // 2)
self.conv2 = nn.Conv2d(channels[1], channels[2], kernel_size=
kernel_sizes[1], padding=kernel_sizes[1] // 2)
self.conv3 = nn.Conv2d(channels[2], channels[3], kernel_size=
kernel_sizes[2], padding=kernel_sizes[2] // 2)
self.relu = nn.ReLU()
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is None:
pass
else:
raise TypeError(
f'"pretrained" must be a str or None. But received {type(pretrained)}.'
)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Jason-Khan/mmediting | SRCNN | false | 1,025 | [
"Apache-2.0"
] | 0 | d187f95a675dff3eb975a575bd9278d643b5b645 | https://github.com/Jason-Khan/mmediting/tree/d187f95a675dff3eb975a575bd9278d643b5b645 | import logging
import torch
import torchvision
import warnings
from collections import OrderedDict
from torch.utils import model_zoo
from torch.nn import functional as F
import torch.nn as nn
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmedit".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = get_logger(__name__.split('.')[0], log_file, log_level)
return logger
def _get_mmcv_home():
mmcv_home = os.path.expanduser(os.getenv(ENV_MMCV_HOME, os.path.join(os
.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
mkdir_or_exist(mmcv_home)
return mmcv_home
def _process_mmcls_checkpoint(checkpoint):
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint
def get_deprecated_model_names():
deprecate_json_path = osp.join(mmcv.__path__[0],
'model_zoo/deprecated.json')
deprecate_urls = load_file(deprecate_json_path)
assert isinstance(deprecate_urls, dict)
return deprecate_urls
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls
def get_torchvision_models():
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module(f'torchvision.models.{name}')
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
def load_fileclient_dist(filename, backend, map_location):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
allowed_backends = ['ceph']
if backend not in allowed_backends:
raise ValueError(f'Load from Backend {backend} is not supported.')
if rank == 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint
def load_url_dist(url, model_dir=None):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
checkpoint = model_zoo.load_
# ... truncated (>4000 chars) for memory efficiency |
PairCosineSim | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/io/ciob4kb36gg4wx77qlmocg7jy3y7sntoa4fvgm3oato4xhh5cpe4.py
# Topologically Sorted Source Nodes: [similarities], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# similarities => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %mul_1, %mul_2, %mul_3],), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (4*x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp6 * tmp6
tmp8 = tl.load(in_ptr1 + (1 + (4*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = tl.load(in_ptr1 + (2 + (4*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + (4*x0)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = 1e-10
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = float("inf")
tmp20 = triton_helpers.minimum(tmp18, tmp19)
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp5 * tmp21
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp4, tmp22, tmp23)
tmp25 = tmp0 >= tmp3
tmp26 = tl.full([1], 8, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tl.load(in_ptr2 + ((-4) + x0), tmp28 & xmask, eviction_policy='evict_last', other=0.0)
tmp30 = tl.load(in_ptr1 + (16 + (4*((-4) + x0))), tmp28 & xmask, eviction_policy='evict_last', other=0.0)
tmp31 = tmp30 * tmp30
tmp32 = tl.load(in_ptr1 + (17 + (4*((-4) + x0))), tmp28 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tmp32 * tmp32
tmp34 = tmp31 + tmp33
tmp35 = tl.load(in_ptr1 + (18 + (4*((-4) + x0))), tmp28 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp38 = tl.load(in_ptr1 + (19 + (4*((-4) + x0))), tmp28 & xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tmp38 * tmp38
tmp40 = tmp37 + tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp17)
tmp42 = triton_helpers.minimum(tmp41, tmp19)
tmp43 = libdevice.rsqrt(tmp42)
tmp44 = tmp29 * tmp43
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp28, tmp44, tmp45)
tmp47 = tmp0 >= tmp26
tmp48 = tl.full([1], 12, tl.int64)
tmp49 = tmp0 < tmp48
tmp50 = tmp47 & tmp49
tmp51 = tl.load(in_ptr3 + ((-8) + x0), tmp50 & xmask, eviction_policy='evict_last', other=0.0)
tmp52 = tl.load(in_ptr1 + (32 + (4*((-8) + x0))), tmp50 & xmask, eviction_policy='evict_last', other=0.0)
tmp53 = tmp52 * tmp52
tmp54 = tl.load(in_ptr1 + (33 + (4*((-8) + x0))), tmp50 & xmask, eviction_policy='evict_last', other=0.0)
tmp55 = tmp54 * tmp54
tmp56 = tmp53 + tmp55
tmp57 = tl.load(in_ptr1 + (34 + (4*((-8) + x0))), tmp50 & xmask, eviction_policy='evict_last', other=0.0)
tmp58 = tmp57 * tmp57
tmp59 = tmp56 + tmp58
tmp60 = tl.load(in_ptr1 + (35 + (4*((-8) + x0))), tmp50 & xmask, eviction_policy='evict_last', other=0.0)
tmp61 = tmp60 * tmp60
tmp62 = tmp59 + tmp61
tmp63 = triton_helpers.maximum(tmp62, tmp17)
tmp64 = triton_helpers.minimum(tmp63, tmp19)
tmp65 = libdevice.rsqrt(tmp64)
tmp66 = tmp51 * tmp65
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp50, tmp66, tmp67)
tmp69 = tmp0 >= tmp48
tmp70 = tl.full([1], 16, tl.int64)
tmp71 = tmp0 < tmp70
tmp72 = tl.load(in_ptr4 + ((-12) + x0), tmp69 & xmask, eviction_policy='evict_last', other=0.0)
tmp73 = tl.load(in_ptr1 + (48 + (4*((-12) + x0))), tmp69 & xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tmp73 * tmp73
tmp75 = tl.load(in_ptr1 + (49 + (4*((-12) + x0))), tmp69 & xmask, eviction_policy='evict_last', other=0.0)
tmp76 = tmp75 * tmp75
tmp77 = tmp74 + tmp76
tmp78 = tl.load(in_ptr1 + (50 + (4*((-12) + x0))), tmp69 & xmask, eviction_policy='evict_last', other=0.0)
tmp79 = tmp78 * tmp78
tmp80 = tmp77 + tmp79
tmp81 = tl.load(in_ptr1 + (51 + (4*((-12) + x0))), tmp69 & xmask, eviction_policy='evict_last', other=0.0)
tmp82 = tmp81 * tmp81
tmp83 = tmp80 + tmp82
tmp84 = triton_helpers.maximum(tmp83, tmp17)
tmp85 = triton_helpers.minimum(tmp84, tmp19)
tmp86 = libdevice.rsqrt(tmp85)
tmp87 = tmp72 * tmp86
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp69, tmp87, tmp88)
tmp90 = tl.where(tmp50, tmp68, tmp89)
tmp91 = tl.where(tmp28, tmp46, tmp90)
tmp92 = tl.where(tmp4, tmp24, tmp91)
tl.store(out_ptr0 + (x0), tmp92, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_product], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 0), out=buf0)
buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_product_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 16), out=buf1)
buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_product_4], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 32), out=buf2)
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_product_6], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 48), out=buf3)
del arg1_1
buf4 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [similarities], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(buf0, arg0_1, buf1, buf2, buf3, buf4, 16, grid=grid(16), stream=stream0)
del arg0_1
del buf0
del buf1
del buf2
del buf3
return (reinterpret_tensor(buf4, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
class PairCosineSim(nn.Module):
def __init__(self):
super(PairCosineSim, self).__init__()
def forward(self, supports, target):
"""
Calculates pairwise cosine similarity of support sets with target sample.
:param supports: The embeddings of the support set samples, tensor of shape [batch_size, sequence_length, input_size]
:param targets: The embedding of the target sample, tensor of shape [batch_size, input_size] -> [batch_size, sequence_length, input_size]
:return: Tensor with cosine similarities of shape [batch_size, target_size, support_size]
"""
eps = 1e-10
similarities = []
for support_image in supports:
sum_support = torch.sum(torch.pow(support_image, 2), 1)
support_magnitude = sum_support.clamp(eps, float('inf')).rsqrt()
target_unsqueeze = target.unsqueeze(1)
support_image_unsqueeze = support_image.unsqueeze(2)
dot_product = target_unsqueeze.bmm(support_image_unsqueeze)
dot_product = dot_product.squeeze()
cos_sim = dot_product * support_magnitude
similarities.append(cos_sim)
similarities = torch.stack(similarities)
return similarities
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + 4 * x0, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp6 * tmp6
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = tl.load(in_ptr1 + (2 + 4 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tl.load(in_ptr1 + (3 + 4 * x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = 1e-10
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = float('inf')
tmp20 = triton_helpers.minimum(tmp18, tmp19)
tmp21 = libdevice.rsqrt(tmp20)
tmp22 = tmp5 * tmp21
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp4, tmp22, tmp23)
tmp25 = tmp0 >= tmp3
tmp26 = tl.full([1], 8, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tl.load(in_ptr2 + (-4 + x0), tmp28 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp30 = tl.load(in_ptr1 + (16 + 4 * (-4 + x0)), tmp28 & xmask,
eviction_policy='evict_last', other=0.0)
tmp31 = tmp30 * tmp30
tmp32 = tl.load(in_ptr1 + (17 + 4 * (-4 + x0)), tmp28 & xmask,
eviction_policy='evict_last', other=0.0)
tmp33 = tmp32 * tmp32
tmp34 = tmp31 + tmp33
tmp35 = tl.load(in_ptr1 + (18 + 4 * (-4 + x0)), tmp28 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tmp35 * tmp35
tmp37 = tmp34 + tmp36
tmp38 = tl.load(in_ptr1 + (19 + 4 * (-4 + x0)), tmp28 & xmask,
eviction_policy='evict_last', other=0.0)
tmp39 = tmp38 * tmp38
tmp40 = tmp37 + tmp39
tmp41 = triton_helpers.maximum(tmp40, tmp17)
tmp42 = triton_helpers.minimum(tmp41, tmp19)
tmp43 = libdevice.rsqrt(tmp42)
tmp44 = tmp29 * tmp43
tmp45 = tl.full(tmp44.shape, 0.0, tmp44.dtype)
tmp46 = tl.where(tmp28, tmp44, tmp45)
tmp47 = tmp0 >= tmp26
tmp48 = tl.full([1], 12, tl.int64)
tmp49 = tmp0 < tmp48
tmp50 = tmp47 & tmp49
tmp51 = tl.load(in_ptr3 + (-8 + x0), tmp50 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp52 = tl.load(in_ptr1 + (32 + 4 * (-8 + x0)), tmp50 & xmask,
eviction_policy='evict_last', other=0.0)
tmp53 = tmp52 * tmp52
tmp54 = tl.load(in_ptr1 + (33 + 4 * (-8 + x0)), tmp50 & xmask,
eviction_policy='evict_last', other=0.0)
tmp55 = tmp54 * tmp54
tmp56 = tmp53 + tmp55
tmp57 = tl.load(in_ptr1 + (34 + 4 * (-8 + x0)), tmp50 & xmask,
eviction_policy='evict_last', other=0.0)
tmp58 = tmp57 * tmp57
tmp59 = tmp56 + tmp58
tmp60 = tl.load(in_ptr1 + (35 + 4 * (-8 + x0)), tmp50 & xmask,
eviction_policy='evict_last', other=0.0)
tmp61 = tmp60 * tmp60
tmp62 = tmp59 + tmp61
tmp63 = triton_helpers.maximum(tmp62, tmp17)
tmp64 = triton_helpers.minimum(tmp63, tmp19)
tmp65 = libdevice.rsqrt(tmp64)
tmp66 = tmp51 * tmp65
tmp67 = tl.full(tmp66.shape, 0.0, tmp66.dtype)
tmp68 = tl.where(tmp50, tmp66, tmp67)
tmp69 = tmp0 >= tmp48
tl.full([1], 16, tl.int64)
tmp72 = tl.load(in_ptr4 + (-12 + x0), tmp69 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp73 = tl.load(in_ptr1 + (48 + 4 * (-12 + x0)), tmp69 & xmask,
eviction_policy='evict_last', other=0.0)
tmp74 = tmp73 * tmp73
tmp75 = tl.load(in_ptr1 + (49 + 4 * (-12 + x0)), tmp69 & xmask,
eviction_policy='evict_last', other=0.0)
tmp76 = tmp75 * tmp75
tmp77 = tmp74 + tmp76
tmp78 = tl.load(in_ptr1 + (50 + 4 * (-12 + x0)), tmp69 & xmask,
eviction_policy='evict_last', other=0.0)
tmp79 = tmp78 * tmp78
tmp80 = tmp77 + tmp79
tmp81 = tl.load(in_ptr1 + (51 + 4 * (-12 + x0)), tmp69 & xmask,
eviction_policy='evict_last', other=0.0)
tmp82 = tmp81 * tmp81
tmp83 = tmp80 + tmp82
tmp84 = triton_helpers.maximum(tmp83, tmp17)
tmp85 = triton_helpers.minimum(tmp84, tmp19)
tmp86 = libdevice.rsqrt(tmp85)
tmp87 = tmp72 * tmp86
tmp88 = tl.full(tmp87.shape, 0.0, tmp87.dtype)
tmp89 = tl.where(tmp69, tmp87, tmp88)
tmp90 = tl.where(tmp50, tmp68, tmp89)
tmp91 = tl.where(tmp28, tmp46, tmp90)
tmp92 = tl.where(tmp4, tmp24, tmp91)
tl.store(out_ptr0 + x0, tmp92, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1),
0), reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 0), out=buf0)
buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1),
0), reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 16), out=buf1)
buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1),
0), reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 32), out=buf2)
buf3 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (4, 1, 4), (4, 4, 1),
0), reinterpret_tensor(arg0_1, (4, 4, 1), (4, 1, 1), 48), out=buf3)
del arg1_1
buf4 = empty_strided_cuda((16,), (1,), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(16)](buf0, arg0_1, buf1, buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del buf0
del buf1
del buf2
del buf3
return reinterpret_tensor(buf4, (4, 4), (4, 1), 0),
class PairCosineSimNew(nn.Module):
def __init__(self):
super(PairCosineSimNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| SamujjwalSam/MatchingNetworks4XC | PairCosineSim | false | 1,026 | [
"MIT"
] | 0 | 2519cc1a527ea121c4966c1a860d890d5182f887 | https://github.com/SamujjwalSam/MatchingNetworks4XC/tree/2519cc1a527ea121c4966c1a860d890d5182f887 | import torch
import torch.utils.data
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, supports, target):
"""
Calculates pairwise cosine similarity of support sets with target sample.
:param supports: The embeddings of the support set samples, tensor of shape [batch_size, sequence_length, input_size]
:param targets: The embedding of the target sample, tensor of shape [batch_size, input_size] -> [batch_size, sequence_length, input_size]
:return: Tensor with cosine similarities of shape [batch_size, target_size, support_size]
"""
eps = 1e-10
similarities = []
for support_image in supports:
sum_support = torch.sum(torch.pow(support_image, 2), 1)
support_magnitude = sum_support.clamp(eps, float('inf')).rsqrt()
target_unsqueeze = target.unsqueeze(1)
support_image_unsqueeze = support_image.unsqueeze(2)
dot_product = target_unsqueeze.bmm(support_image_unsqueeze)
dot_product = dot_product.squeeze()
cos_sim = dot_product * support_magnitude
similarities.append(cos_sim)
similarities = torch.stack(similarities)
return similarities
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
squeeze | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3u/c3ub52l73zdv4klgqzgxmtzrzxvztuyczv2jksnvrjr7erq7guxd.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%getitem,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 64, 64), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class squeeze(nn.Module):
def __init__(self, block_size):
super(squeeze, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def inverse(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, d_height, d_width, d_depth = output.size()
s_depth = int(d_depth / self.block_size_sq)
s_width = int(d_width * self.block_size)
s_height = int(d_height * self.block_size)
t_1 = output.contiguous().view(batch_size, d_height, d_width, self.
block_size_sq, s_depth)
spl = t_1.split(self.block_size, 3)
stack = [t_t.contiguous().view(batch_size, d_height, s_width,
s_depth) for t_t in spl]
output = torch.stack(stack, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).contiguous().view(batch_size, s_height, s_width, s_depth)
output = output.permute(0, 3, 1, 2)
return output.contiguous()
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, s_height, _s_width, s_depth = output.size()
d_depth = s_depth * self.block_size_sq
d_height = int(s_height / self.block_size)
t_1 = output.split(self.block_size, 2)
stack = [t_t.contiguous().view(batch_size, d_height, d_depth) for
t_t in t_1]
output = torch.stack(stack, 1)
output = output.permute(0, 2, 1, 3)
output = output.permute(0, 3, 1, 2)
return output.contiguous()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'block_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 64, 1, 1), (64, 1, 64, 64), 0),
class squeezeNew(nn.Module):
def __init__(self, block_size):
super(squeezeNew, self).__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def inverse(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, d_height, d_width, d_depth = output.size()
s_depth = int(d_depth / self.block_size_sq)
s_width = int(d_width * self.block_size)
s_height = int(d_height * self.block_size)
t_1 = output.contiguous().view(batch_size, d_height, d_width, self.
block_size_sq, s_depth)
spl = t_1.split(self.block_size, 3)
stack = [t_t.contiguous().view(batch_size, d_height, s_width,
s_depth) for t_t in spl]
output = torch.stack(stack, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).contiguous().view(batch_size, s_height, s_width, s_depth)
output = output.permute(0, 3, 1, 2)
return output.contiguous()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Schwartz-Zha/My-invertible-resnet | squeeze | false | 1,027 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, block_size):
super().__init__()
self.block_size = block_size
self.block_size_sq = block_size * block_size
def inverse(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, d_height, d_width, d_depth = output.size()
s_depth = int(d_depth / self.block_size_sq)
s_width = int(d_width * self.block_size)
s_height = int(d_height * self.block_size)
t_1 = output.contiguous().view(batch_size, d_height, d_width, self.
block_size_sq, s_depth)
spl = t_1.split(self.block_size, 3)
stack = [t_t.contiguous().view(batch_size, d_height, s_width,
s_depth) for t_t in spl]
output = torch.stack(stack, 0).transpose(0, 1).permute(0, 2, 1, 3, 4
).contiguous().view(batch_size, s_height, s_width, s_depth)
output = output.permute(0, 3, 1, 2)
return output.contiguous()
def forward(self, input):
output = input.permute(0, 2, 3, 1)
batch_size, s_height, _s_width, s_depth = output.size()
d_depth = s_depth * self.block_size_sq
d_height = int(s_height / self.block_size)
t_1 = output.split(self.block_size, 2)
stack = [t_t.contiguous().view(batch_size, d_height, d_depth) for
t_t in t_1]
output = torch.stack(stack, 1)
output = output.permute(0, 2, 1, 3)
output = output.permute(0, 3, 1, 2)
return output.contiguous()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MaxMinGroup | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/6p/c6p6sxn6eyjkfrrvgdkwlhviqvdquvcktva3443bsgc22ofn2dk7.py
# Topologically Sorted Source Nodes: [maxmin], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# maxmin => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %getitem_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x1) + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 8, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr0 + ((4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (1 + (4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = triton_helpers.minimum(tmp17, tmp18)
tmp20 = tl.load(in_ptr0 + (2 + (4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (3 + (4*x0) + (16*((-4) + x1)) + (64*x2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.minimum(tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + (x3), tmp26, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 1), (32, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [maxmin], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def process_maxmin_groupsize(x, group_size, axis=-1):
size = list(x.size())
num_channels = size[axis]
if num_channels % group_size:
raise ValueError(
'number of features({}) is not a multiple of group_size({})'.
format(num_channels, num_channels))
size[axis] = -1
if axis == -1:
size += [group_size]
else:
size.insert(axis + 1, group_size)
return size
def maxout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.max(x.view(*size), sort_dim)[0]
def minout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.min(x.view(*size), sort_dim)[0]
class MaxMinGroup(nn.Module):
def __init__(self, group_size, axis=-1):
super(MaxMinGroup, self).__init__()
self.group_size = group_size
self.axis = axis
def forward(self, x):
maxes = maxout_by_group(x, self.group_size, self.axis)
mins = minout_by_group(x, self.group_size, self.axis)
maxmin = torch.cat((maxes, mins), dim=1)
return maxmin
def extra_repr(self):
return 'group_size: {}'.format(self.group_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'group_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x1 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x1 + 64 * x2), tmp4 &
xmask, eviction_policy='evict_last', other=0.0)
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp17 = tl.load(in_ptr0 + (4 * x0 + 16 * (-4 + x1) + 64 * x2), tmp14 &
xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * (-4 + x1) + 64 * x2),
tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp19 = triton_helpers.minimum(tmp17, tmp18)
tmp20 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * (-4 + x1) + 64 * x2),
tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.minimum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * (-4 + x1) + 64 * x2),
tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.minimum(tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp14, tmp23, tmp24)
tmp26 = tl.where(tmp4, tmp13, tmp25)
tl.store(out_ptr0 + x3, tmp26, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 1), (32, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
def process_maxmin_groupsize(x, group_size, axis=-1):
size = list(x.size())
num_channels = size[axis]
if num_channels % group_size:
raise ValueError(
'number of features({}) is not a multiple of group_size({})'.
format(num_channels, num_channels))
size[axis] = -1
if axis == -1:
size += [group_size]
else:
size.insert(axis + 1, group_size)
return size
def maxout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.max(x.view(*size), sort_dim)[0]
def minout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.min(x.view(*size), sort_dim)[0]
class MaxMinGroupNew(nn.Module):
def __init__(self, group_size, axis=-1):
super(MaxMinGroupNew, self).__init__()
self.group_size = group_size
self.axis = axis
def extra_repr(self):
return 'group_size: {}'.format(self.group_size)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Schwartz-Zha/My-invertible-resnet | MaxMinGroup | false | 1,028 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
def process_maxmin_groupsize(x, group_size, axis=-1):
size = list(x.size())
num_channels = size[axis]
if num_channels % group_size:
raise ValueError(
'number of features({}) is not a multiple of group_size({})'.
format(num_channels, num_channels))
size[axis] = -1
if axis == -1:
size += [group_size]
else:
size.insert(axis + 1, group_size)
return size
def maxout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.max(x.view(*size), sort_dim)[0]
def minout_by_group(x, group_size, axis=-1):
size = process_maxmin_groupsize(x, group_size, axis)
sort_dim = axis if axis == -1 else axis + 1
return torch.min(x.view(*size), sort_dim)[0]
class Model(nn.Module):
def __init__(self, group_size, axis=-1):
super().__init__()
self.group_size = group_size
self.axis = axis
def forward(self, x):
maxes = maxout_by_group(x, self.group_size, self.axis)
mins = minout_by_group(x, self.group_size, self.axis)
maxmin = torch.cat((maxes, mins), dim=1)
return maxmin
def extra_repr(self):
return 'group_size: {}'.format(self.group_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Split | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/cp/ccplpqa4hjz3n3j5jujs47gyz26ndm5naxvwku5nkktkkpfuh4um.py
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ie/cie6ockbfnni22s6gh6b3qk2guxsbx7g4f3xzhpygcbbqvw47fbq.py
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x2 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%slice_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
buf1 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x2], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(arg0_1, buf1, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Split(nn.Module):
def __init__(self):
super(Split, self).__init__()
def forward(self, x):
n = int(x.size(1) / 2)
x1 = x[:, :n, :, :].contiguous()
x2 = x[:, n:, :, :].contiguous()
return x1, x2
def inverse(self, x1, x2):
return torch.cat((x1, x2), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(128)](arg0_1, buf1, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0, buf1
class SplitNew(nn.Module):
def __init__(self):
super(SplitNew, self).__init__()
def inverse(self, x1, x2):
return torch.cat((x1, x2), 1)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| Schwartz-Zha/My-invertible-resnet | Split | false | 1,029 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
n = int(x.size(1) / 2)
x1 = x[:, :n, :, :].contiguous()
x2 = x[:, n:, :, :].contiguous()
return x1, x2
def inverse(self, x1, x2):
return torch.cat((x1, x2), 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
NormalAttention_gaussian | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/qk/cqkamalvxbuxnfcdqxwqnh7w6tog7uhcqbe6niuydyowvdvbnfoc.py
# Topologically Sorted Source Nodes: [energy_1, energy_sum, energy_2], Original ATen: [aten.exp, aten.sum, aten.div]
# Source node to ATen node mapping:
# energy_1 => exp
# energy_2 => div
# energy_sum => sum_1
# Graph fragment:
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%bmm,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused_div_exp_sum_0 = async_compile.triton('triton_per_fused_div_exp_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_exp_sum_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl_math.exp(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = tmp1 / tmp5
tl.store(out_ptr1 + (r1 + (16*x0)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf0)
buf2 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [energy_1, energy_sum, energy_2], Original ATen: [aten.exp, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_div_exp_sum_0.run(buf0, buf2, 64, 16, grid=grid(64), stream=stream0)
del buf0
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf4, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf5 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), buf2, out=buf5)
del buf4
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf7, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf7, primals_1, primals_2, primals_4, reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf2, (4, 16, 16), (256, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class NormalAttention_gaussian(nn.Module):
def __init__(self, input_channel_num):
super(NormalAttention_gaussian, self).__init__()
self.c_in = input_channel_num
self.value_conv = nn.Conv2d(in_channels=self.c_in, out_channels=
self.c_in, kernel_size=1)
self.gamma = nn.Conv2d(in_channels=self.c_in, out_channels=self.
c_in, kernel_size=1)
def forward(self, x):
B, C, H, W = x.size()
proj_query = x.view(B, -1, H * W).permute(0, 2, 1)
proj_key = x.view(B, -1, H * W)
energy = torch.bmm(proj_query, proj_key)
energy = torch.exp(energy)
energy_sum = torch.sum(energy, dim=2, keepdim=True)
energy = energy / energy_sum
proj_value = self.value_conv(x).view(B, -1, H * W)
out = torch.bmm(proj_value, energy).view(B, C, H, W)
out = self.gamma(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channel_num': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_div_exp_sum_0(in_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl_math.exp(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = tmp1 / tmp5
tl.store(out_ptr1 + (r1 + 16 * x0), tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 16, 4), (64, 1,
16), 0), reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1),
0), out=buf0)
buf2 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_exp_sum_0[grid(64)](buf0, buf2, 64, 16, XBLOCK
=8, num_warps=2, num_stages=1)
del buf0
buf3 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_1[grid(256)](buf4, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf5 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1),
0), buf2, out=buf5)
del buf4
buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 4, 4,
4), (64, 16, 4, 1), 0), primals_4, stride=(1, 1), padding=(0, 0
), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_1[grid(256)](buf7, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf7, primals_1, primals_2, primals_4, reinterpret_tensor(buf5,
(4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf2, (4, 16,
16), (256, 1, 16), 0)
class NormalAttention_gaussianNew(nn.Module):
def __init__(self, input_channel_num):
super(NormalAttention_gaussianNew, self).__init__()
self.c_in = input_channel_num
self.value_conv = nn.Conv2d(in_channels=self.c_in, out_channels=
self.c_in, kernel_size=1)
self.gamma = nn.Conv2d(in_channels=self.c_in, out_channels=self.
c_in, kernel_size=1)
def forward(self, input_0):
primals_2 = self.value_conv.weight
primals_3 = self.value_conv.bias
primals_4 = self.gamma.weight
primals_5 = self.gamma.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Schwartz-Zha/My-invertible-resnet | NormalAttention_gaussian | false | 1,030 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_channel_num):
super().__init__()
self.c_in = input_channel_num
self.value_conv = nn.Conv2d(in_channels=self.c_in, out_channels=
self.c_in, kernel_size=1)
self.gamma = nn.Conv2d(in_channels=self.c_in, out_channels=self.
c_in, kernel_size=1)
def forward(self, x):
B, C, H, W = x.size()
proj_query = x.view(B, -1, H * W).permute(0, 2, 1)
proj_key = x.view(B, -1, H * W)
energy = torch.bmm(proj_query, proj_key)
energy = torch.exp(energy)
energy_sum = torch.sum(energy, dim=2, keepdim=True)
energy = energy / energy_sum
proj_value = self.value_conv(x).view(B, -1, H * W)
out = torch.bmm(proj_value, energy).view(B, C, H, W)
out = self.gamma(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Conv2dZeroInit | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/b5/cb5h536lfxehnv2ezobudfl5wugu2y6mu444yw7yei4n22rp33zu.py
# Topologically Sorted Source Nodes: [out, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp]
# Source node to ATen node mapping:
# exp => exp
# mul => mul
# mul_1 => mul_1
# out => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 3.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %exp), kwargs = {})
triton_poi_fused_convolution_exp_mul_0 = async_compile.triton('triton_poi_fused_convolution_exp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_exp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 3.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out, mul, exp, mul_1], Original ATen: [aten.convolution, aten.mul, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_exp_mul_0.run(buf1, primals_2, primals_4, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf2, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Conv2dZeroInit(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
out = super().forward(input)
return out * torch.exp(self.logs * self.logscale_factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels_in': 4, 'channels_out': 4, 'filter_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_exp_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = 3.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp2 * tmp6
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_exp_mul_0[grid(16)](buf1, primals_2,
primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, primals_4, buf1
class Conv2dZeroInitNew(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_4 = self.logs
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Schwartz-Zha/My-invertible-resnet | Conv2dZeroInit | false | 1,031 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
class Model(nn.Conv2d):
def __init__(self, channels_in, channels_out, filter_size, stride=1,
padding=0, logscale=3.0):
super().__init__(channels_in, channels_out, filter_size, stride=
stride, padding=padding)
self.register_parameter('logs', nn.Parameter(torch.zeros(
channels_out, 1, 1)))
self.logscale_factor = logscale
def reset_parameters(self):
self.weight.data.zero_()
self.bias.data.zero_()
def forward(self, input):
out = super().forward(input)
return out * torch.exp(self.logs * self.logscale_factor)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/zv/czvfpj3ah2lefbwpcuw4esv23bxs5a3ab63ply3ntgbsdktepd5v.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 784) % 6
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = (xindex // 14)
x2 = (xindex // 1176)
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xe/cxelxvpw3asckozc53rh36773aohp5hqpbp2nos5ymcdqhxvo4bl.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 100) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jn/cjnqv3sgcv5x2iz7ij5zdad6ofabcnonrlksgsxu2ob7n274gz6b.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/v4/cv4ko24f26un3axamp426zbnugqu4jhirnvlxjjqhipcugzzgcbk.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120, ), (1, ))
assert_size_stride(primals_8, (4, 120), (120, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (10, 4), (4, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 18816, grid=grid(18816), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_7, 480, grid=grid(480), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 4), (1, 120), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 16, grid=grid(16), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (4, 10), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((120, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 120), (120, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, h2):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, h2)
self.fc3 = nn.Linear(h2, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {'h2': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (4, 120), (120, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (10, 4), (4, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2,
18816, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 4), (1,
120), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(16)](buf11, primals_9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (4, 10), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11,
primals_10, primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self, h2):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, h2)
self.fc3 = nn.Linear(h2, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| Saran-nns/delve | Net | false | 1,032 | [
"MIT"
] | 0 | 3489d8aa13181b392d3c47a19f9d9a47d87f8790 | https://github.com/Saran-nns/delve/tree/3489d8aa13181b392d3c47a19f9d9a47d87f8790 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, h2):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, h2)
self.fc3 = nn.Linear(h2, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [4]
|
MeanVarFC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xl/cxltji5v4pd435j44wezjn5dhexfkx4wrcadp35cffwfzsbaisad.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 8), (8, 1))
assert_size_stride(primals_2, (4, 4, 4, 8), (128, 32, 8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 8), (128, 32, 8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MeanVarFC(nn.Module):
def __init__(self, input_shape):
super(MeanVarFC, self).__init__()
shape = list(input_shape)
shape[0] = 1
shape[1] *= 2
self.param = nn.Parameter(0.01 * torch.randn(shape))
def forward(self, x):
x = x + self.param
return x
def get_inputs():
return [torch.rand([4, 4, 4, 8])]
def get_init_inputs():
return [[], {'input_shape': [4, 4]}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 8), (8, 1))
assert_size_stride(primals_2, (4, 4, 4, 8), (128, 32, 8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(512)](primals_2, primals_1, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class MeanVarFCNew(nn.Module):
def __init__(self, input_shape):
super(MeanVarFCNew, self).__init__()
shape = list(input_shape)
shape[0] = 1
shape[1] *= 2
self.param = nn.Parameter(0.01 * torch.randn(shape))
def forward(self, input_0):
primals_1 = self.param
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| Schwartz-Zha/My-invertible-resnet | MeanVarFC | false | 1,033 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_shape):
super().__init__()
shape = list(input_shape)
shape[0] = 1
shape[1] *= 2
self.param = nn.Parameter(0.01 * torch.randn(shape))
def forward(self, x):
x = x + self.param
return x
def get_inputs():
return [torch.rand([4, 4, 4, 8])]
def get_init_inputs():
return []
|
injective_pad | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ct/cct5cn6mo6zny2z4rzeplk2fltbb4cibi5gdqoa7cpdxa6kif7lx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x_1 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%permute, [0, 0, 0, 4], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 8
x0 = xindex % 4
x2 = (xindex // 32) % 4
x3 = (xindex // 128)
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 8, 4, 4), (128, 4, 32, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class injective_pad(nn.Module):
def __init__(self, pad_size):
super(injective_pad, self).__init__()
self.pad_size = pad_size
self.pad = nn.ZeroPad2d((0, 0, 0, pad_size))
def forward(self, x):
x = x.permute(0, 2, 1, 3)
x = self.pad(x)
return x.permute(0, 2, 1, 3)
def inverse(self, x):
return x[:, :x.size(1) - self.pad_size, :, :]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'pad_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 8
x0 = xindex % 4
x2 = xindex // 32 % 4
x3 = xindex // 128
x4 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp2 &
xmask, other=0.0)
tl.store(out_ptr0 + x4, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 4), (128, 32, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(512)](arg0_1, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 8, 4, 4), (128, 4, 32, 1), 0),
class injective_padNew(nn.Module):
def __init__(self, pad_size):
super(injective_padNew, self).__init__()
self.pad_size = pad_size
self.pad = nn.ZeroPad2d((0, 0, 0, pad_size))
def inverse(self, x):
return x[:, :x.size(1) - self.pad_size, :, :]
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Schwartz-Zha/My-invertible-resnet | injective_pad | false | 1,034 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, pad_size):
super().__init__()
self.pad_size = pad_size
self.pad = nn.ZeroPad2d((0, 0, 0, pad_size))
def forward(self, x):
x = x.permute(0, 2, 1, 3)
x = self.pad(x)
return x.permute(0, 2, 1, 3)
def inverse(self, x):
return x[:, :x.size(1) - self.pad_size, :, :]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ANNClassifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/eg/ceg5r5lesy4yjcsgwggyxi634wo4m2ibfbfrzibrfm7cykiqvg76.py
# Topologically Sorted Source Nodes: [feature_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# feature_1 => getitem
# Graph fragment:
# %getitem : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/uk/cuk5lavfoty5guvxyu3dtcj2ziwvajlbsezoxgszx5nevrcmthnr.py
# Topologically Sorted Source Nodes: [padded_x], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# padded_x => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=3] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%getitem, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 3)
x0 = xindex % 3
x2 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = tmp2 & tmp4
tmp10 = tmp9 & tmp6
tmp11 = tmp10 & tmp8
tmp12 = tl.load(in_ptr0 + ((-1) + x1), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5x/c5xvls7xnlknpp2gn4xtpmhjw3qey4npwog52sanzzlugzbe3nuh.py
# Topologically Sorted Source Nodes: [k_out_2, out_1], Original ATen: [aten.cat, aten._softmax]
# Source node to ATen node mapping:
# k_out_2 => cat
# out_1 => div, exp, sum_1
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add, %add_1], 1), kwargs = {})
# %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
# %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%view_2, 0), kwargs = {})
# %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %where_self), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %view_2), kwargs = {})
# %mul_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_2,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_cat_2 = async_compile.triton('triton_per_fused__softmax_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 80
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
x1 = (xindex // 4)
r5 = rindex
x0 = xindex % 4
r3 = (rindex // 3)
r2 = rindex % 3
x4 = xindex
tmp19 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (r5 + (3*x0) + (18*x1)), rmask & tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (r3 + (3*x1)), rmask & tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1, 1], 20, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr0 + (180 + r5 + (3*x0) + (18*((-10) + x1))), rmask & tmp10 & xmask, other=0.0)
tmp14 = tl.load(in_ptr2 + (r2 + (3*((-10) + x1))), rmask & tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tmp20 = 0.0
tmp21 = tmp19 >= tmp20
tmp22 = 1.0
tmp23 = -1.0
tmp24 = tl.where(tmp21, tmp22, tmp23)
tmp25 = tmp18 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = tl.where(rmask & xmask, tmp26, float("-inf"))
tmp29 = triton_helpers.max2(tmp28, 1)[:, None]
tmp30 = tmp25 - tmp29
tmp31 = tmp24 * tmp19
tmp32 = tmp30 * tmp31
tmp33 = tl_math.exp(tmp32)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp36 = tl.where(rmask & xmask, tmp34, 0)
tmp37 = tl.sum(tmp36, 1)[:, None]
tmp38 = tmp33 / tmp37
tl.store(out_ptr0 + (r5 + (9*x4)), tmp18, rmask & xmask)
tl.store(out_ptr3 + (r5 + (9*x4)), tmp38, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gb/cgbpq6g2egq2fxmxrkb5cl25fh5pxlxvodou3mrtl4672gbnhrm7.py
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%unfold_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 720
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 9
x1 = (xindex // 9) % 4
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (3*x1) + (18*x2)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/zc/czcg4krcjbxycgug3jpxbvrq54v7wgydsnqyhluixxyapo3b4ep3.py
# Topologically Sorted Source Nodes: [feature_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# feature_2 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (20, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (20, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_4, (20, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (10, 1, 1, 3, 1), (3, 3, 3, 1, 1))
assert_size_stride(primals_6, (10, 1, 1, 1, 3), (3, 3, 3, 3, 1))
assert_size_stride(primals_7, (80, 20), (20, 1))
assert_size_stride(primals_8, (80, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 4, 1), (4, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [feature_1], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(primals_1, buf0, 4, grid=grid(4), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 1, 6, 3), (18, 18, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [padded_x], Original ATen: [aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_1.run(buf0, buf1, 18, grid=grid(18), stream=stream0)
# Topologically Sorted Source Nodes: [q_out], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 20, 4, 1), (80, 4, 1, 1))
# Topologically Sorted Source Nodes: [k_out], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (1, 20, 6, 3), (360, 18, 3, 1))
# Topologically Sorted Source Nodes: [v_out], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (1, 20, 6, 3), (360, 18, 3, 1))
buf5 = empty_strided_cuda((1, 20, 4, 1, 3, 3), (720, 36, 9, 9, 3, 1), torch.float32)
buf8 = empty_strided_cuda((1, 1, 20, 4, 1, 9), (720, 720, 36, 9, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [k_out_2, out_1], Original ATen: [aten.cat, aten._softmax]
triton_per_fused__softmax_cat_2.run(buf3, primals_5, primals_6, buf2, buf5, buf8, 80, 9, grid=grid(80), stream=stream0)
del buf3
del primals_5
del primals_6
buf9 = empty_strided_cuda((1, 20, 4, 1, 3, 3), (720, 36, 9, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf4, buf9, 720, grid=grid(720), stream=stream0)
del buf4
buf10 = empty_strided_cuda((80, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (80, 1, 9), (9, 9, 1), 0), reinterpret_tensor(buf9, (80, 9, 1), (9, 1, 0), 0), out=buf10)
buf11 = reinterpret_tensor(buf10, (1, 20, 4, 1), (80, 4, 1, 1), 0); del buf10 # reuse
buf17 = empty_strided_cuda((1, 20, 4, 1), (80, 4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [feature_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_4.run(buf11, buf17, 80, grid=grid(80), stream=stream0)
buf16 = empty_strided_cuda((4, 80), (80, 1), torch.float32)
buf12 = reinterpret_tensor(buf16, (1, 80), (80, 1), 0) # alias
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf11, (1, 20), (0, 1), 0), reinterpret_tensor(primals_7, (20, 80), (1, 20), 0), alpha=1, beta=1, out=buf12)
buf13 = reinterpret_tensor(buf16, (1, 80), (80, 1), 80) # alias
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf11, (1, 20), (0, 1), 20), reinterpret_tensor(primals_7, (20, 80), (1, 20), 0), alpha=1, beta=1, out=buf13)
buf14 = reinterpret_tensor(buf16, (1, 80), (80, 1), 160) # alias
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf11, (1, 20), (0, 1), 40), reinterpret_tensor(primals_7, (20, 80), (1, 20), 0), alpha=1, beta=1, out=buf14)
buf15 = reinterpret_tensor(buf16, (1, 80), (80, 1), 240) # alias
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf11, (1, 20), (0, 1), 60), reinterpret_tensor(primals_7, (20, 80), (1, 20), 0), alpha=1, beta=1, out=buf15)
del primals_8
return (reinterpret_tensor(buf16, (1, 4, 80), (80, 80, 1), 0), primals_2, primals_3, primals_4, buf0, buf1, buf2, buf5, buf8, reinterpret_tensor(buf11, (1, 20), (20, 1), 0), reinterpret_tensor(buf11, (1, 20), (20, 1), 20), reinterpret_tensor(buf11, (1, 20), (20, 1), 40), reinterpret_tensor(buf11, (1, 20), (20, 1), 60), primals_7, buf17, reinterpret_tensor(buf9, (80, 1, 9), (9, 720, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((20, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((20, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, 1, 1, 3, 1), (3, 3, 3, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 1, 1, 1, 3), (3, 3, 3, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((80, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((80, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class AttentionConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super(AttentionConv, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, _channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self
.padding])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.
groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch,
-1, height, width)
return out
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
class BaseClassifier(nn.Module):
_timestep_dimension = 2
_max_timesteps = 2048
_value_to_pad = 0
@staticmethod
def pad_variable_timesteps(tensor, timestep_dimension=
_timestep_dimension, max_timesteps=_max_timesteps, value_to_pad=
_value_to_pad):
"""
Pads a variable-length tensor to a fixed length along the specified dimension.
e.g. shape [1, 1, 200, 1280] --> [1, 1, 512, 1280], with dim=2.
timestep_dimension: Dimension to pad along. For shape [a, b, c, d], the respective dims are [0, 1, 2, 3].
max_timesteps: Number of elements to pad until.
value_to_pad: Constant value used for padding.
"""
number_of_timesteps = tensor.size(dim=timestep_dimension)
assert number_of_timesteps <= max_timesteps, 'Input received that is longer than . Unable to pad.'
number_of_padded_timesteps = max_timesteps - number_of_timesteps
padding = [0, 0] * (len(tensor.shape) - timestep_dimension - 1) + [
0, number_of_padded_timesteps]
return F.pad(tensor, padding, 'constant', value_to_pad)
class ANNClassifier(BaseClassifier):
def __init__(self, input_dim):
super(ANNClassifier, self).__init__()
self._pool_value = 4
self.hidden_dim_1 = hidden_dim_1 = 20
self.linear_input_dim = hidden_dim_1 * input_dim // self._pool_value
self.out_dim = 80
attn_hyperparams = {'kernel_size': 3, 'padding': 1}
self.attn = AttentionConv(1, hidden_dim_1, **attn_hyperparams)
self.dense = nn.Linear(self.linear_input_dim, self.out_dim)
self.pool = nn.MaxPool2d((1, self._pool_value))
def reshape_input(self, feature, group_size):
down_sample_len = feature.size(1) // group_size
feature = feature[:, :down_sample_len * group_size, :]
reshape_feature = feature.reshape(feature.size(0) * down_sample_len,
group_size * feature.size(2))
return reshape_feature
def forward(self, feature):
feature = feature.unsqueeze(dim=0).unsqueeze(dim=1)
feature = self.pool(feature)
feature = self.attn(feature)
feature = F.relu(feature)
feature = feature.transpose(0, 1)
feature = feature.view(-1, self.linear_input_dim)
feature = torch.stack([self.dense(feature[index].unsqueeze(dim=0)) for
index in range(feature.size()[0])])
feature = feature.transpose(0, 1)
return feature
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 18
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3
x0 = xindex % 3
x2 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp5 < tmp7
tmp9 = tmp2 & tmp4
tmp10 = tmp9 & tmp6
tmp11 = tmp10 & tmp8
tmp12 = tl.load(in_ptr0 + (-1 + x1), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_per_fused__softmax_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 80
rnumel = 9
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
x1 = xindex // 4
r5 = rindex
x0 = xindex % 4
r3 = rindex // 3
r2 = rindex % 3
x4 = xindex
tmp19 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp0 = x1
tl.full([1, 1], 0, tl.int64)
tmp3 = tl.full([1, 1], 10, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (r5 + 3 * x0 + 18 * x1), rmask & tmp4 & xmask,
other=0.0)
tmp6 = tl.load(in_ptr1 + (r3 + 3 * x1), rmask & tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1, 1], 20, tl.int64)
tmp13 = tl.load(in_ptr0 + (180 + r5 + 3 * x0 + 18 * (-10 + x1)), rmask &
tmp10 & xmask, other=0.0)
tmp14 = tl.load(in_ptr2 + (r2 + 3 * (-10 + x1)), rmask & tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 + tmp14
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp10, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp9, tmp17)
tmp20 = 0.0
tmp21 = tmp19 >= tmp20
tmp22 = 1.0
tmp23 = -1.0
tmp24 = tl.where(tmp21, tmp22, tmp23)
tmp25 = tmp18 * tmp24
tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp28 = tl.where(rmask & xmask, tmp26, float('-inf'))
tmp29 = triton_helpers.max2(tmp28, 1)[:, None]
tmp30 = tmp25 - tmp29
tmp31 = tmp24 * tmp19
tmp32 = tmp30 * tmp31
tmp33 = tl_math.exp(tmp32)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp36 = tl.where(rmask & xmask, tmp34, 0)
tmp37 = tl.sum(tmp36, 1)[:, None]
tmp38 = tmp33 / tmp37
tl.store(out_ptr0 + (r5 + 9 * x4), tmp18, rmask & xmask)
tl.store(out_ptr3 + (r5 + 9 * x4), tmp38, rmask & xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 720
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 9
x1 = xindex // 9 % 4
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 3 * x1 + 18 * x2), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (20, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_3, (20, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_4, (20, 1, 1, 1), (1, 1, 1, 1))
assert_size_stride(primals_5, (10, 1, 1, 3, 1), (3, 3, 3, 1, 1))
assert_size_stride(primals_6, (10, 1, 1, 1, 3), (3, 3, 3, 3, 1))
assert_size_stride(primals_7, (80, 20), (20, 1))
assert_size_stride(primals_8, (80,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 1, 4, 1), (4, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(4)](primals_1, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 1, 6, 3), (18, 18, 3, 1), torch.float32)
triton_poi_fused_constant_pad_nd_1[grid(18)](buf0, buf1, 18, XBLOCK
=32, num_warps=1, num_stages=1)
buf2 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (1, 20, 4, 1), (80, 4, 1, 1))
buf3 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (1, 20, 6, 3), (360, 18, 3, 1))
buf4 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (1, 20, 6, 3), (360, 18, 3, 1))
buf5 = empty_strided_cuda((1, 20, 4, 1, 3, 3), (720, 36, 9, 9, 3, 1
), torch.float32)
buf8 = empty_strided_cuda((1, 1, 20, 4, 1, 9), (720, 720, 36, 9, 9,
1), torch.float32)
triton_per_fused__softmax_cat_2[grid(80)](buf3, primals_5,
primals_6, buf2, buf5, buf8, 80, 9, XBLOCK=1, num_warps=2,
num_stages=1)
del buf3
del primals_5
del primals_6
buf9 = empty_strided_cuda((1, 20, 4, 1, 3, 3), (720, 36, 9, 9, 3, 1
), torch.float32)
triton_poi_fused_clone_3[grid(720)](buf4, buf9, 720, XBLOCK=128,
num_warps=4, num_stages=1)
del buf4
buf10 = empty_strided_cuda((80, 1, 1), (1, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (80, 1, 9), (9, 9, 1),
0), reinterpret_tensor(buf9, (80, 9, 1), (9, 1, 0), 0), out=buf10)
buf11 = reinterpret_tensor(buf10, (1, 20, 4, 1), (80, 4, 1, 1), 0)
del buf10
buf17 = empty_strided_cuda((1, 20, 4, 1), (80, 4, 1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(80)](buf11, buf17,
80, XBLOCK=128, num_warps=4, num_stages=1)
buf16 = empty_strided_cuda((4, 80), (80, 1), torch.float32)
buf12 = reinterpret_tensor(buf16, (1, 80), (80, 1), 0)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf11, (1, 20),
(0, 1), 0), reinterpret_tensor(primals_7, (20, 80), (1, 20), 0),
alpha=1, beta=1, out=buf12)
buf13 = reinterpret_tensor(buf16, (1, 80), (80, 1), 80)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf11, (1, 20),
(0, 1), 20), reinterpret_tensor(primals_7, (20, 80), (1, 20), 0
), alpha=1, beta=1, out=buf13)
buf14 = reinterpret_tensor(buf16, (1, 80), (80, 1), 160)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf11, (1, 20),
(0, 1), 40), reinterpret_tensor(primals_7, (20, 80), (1, 20), 0
), alpha=1, beta=1, out=buf14)
buf15 = reinterpret_tensor(buf16, (1, 80), (80, 1), 240)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf11, (1, 20),
(0, 1), 60), reinterpret_tensor(primals_7, (20, 80), (1, 20), 0
), alpha=1, beta=1, out=buf15)
del primals_8
return (reinterpret_tensor(buf16, (1, 4, 80), (80, 80, 1), 0),
primals_2, primals_3, primals_4, buf0, buf1, buf2, buf5, buf8,
reinterpret_tensor(buf11, (1, 20), (20, 1), 0), reinterpret_tensor(
buf11, (1, 20), (20, 1), 20), reinterpret_tensor(buf11, (1, 20), (
20, 1), 40), reinterpret_tensor(buf11, (1, 20), (20, 1), 60),
primals_7, buf17, reinterpret_tensor(buf9, (80, 1, 9), (9, 720, 1), 0))
class AttentionConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super(AttentionConv, self).__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, _channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self
.padding])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.
groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch,
-1, height, width)
return out
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
class BaseClassifier(nn.Module):
_timestep_dimension = 2
_max_timesteps = 2048
_value_to_pad = 0
@staticmethod
def pad_variable_timesteps(tensor, timestep_dimension=
_timestep_dimension, max_timesteps=_max_timesteps, value_to_pad=
_value_to_pad):
"""
Pads a variable-length tensor to a fixed length along the specified dimension.
e.g. shape [1, 1, 200, 1280] --> [1, 1, 512, 1280], with dim=2.
timestep_dimension: Dimension to pad along. For shape [a, b, c, d], the respective dims are [0, 1, 2, 3].
max_timesteps: Number of elements to pad until.
value_to_pad: Constant value used for padding.
"""
number_of_timesteps = tensor.size(dim=timestep_dimension)
assert number_of_timesteps <= max_timesteps, 'Input received that is longer than . Unable to pad.'
number_of_padded_timesteps = max_timesteps - number_of_timesteps
padding = [0, 0] * (len(tensor.shape) - timestep_dimension - 1) + [
0, number_of_padded_timesteps]
return F.pad(tensor, padding, 'constant', value_to_pad)
class ANNClassifierNew(BaseClassifier):
def __init__(self, input_dim):
super(ANNClassifierNew, self).__init__()
self._pool_value = 4
self.hidden_dim_1 = hidden_dim_1 = 20
self.linear_input_dim = hidden_dim_1 * input_dim // self._pool_value
self.out_dim = 80
attn_hyperparams = {'kernel_size': 3, 'padding': 1}
self.attn = AttentionConv(1, hidden_dim_1, **attn_hyperparams)
self.dense = nn.Linear(self.linear_input_dim, self.out_dim)
self.pool = nn.MaxPool2d((1, self._pool_value))
def reshape_input(self, feature, group_size):
down_sample_len = feature.size(1) // group_size
feature = feature[:, :down_sample_len * group_size, :]
reshape_feature = feature.reshape(feature.size(0) * down_sample_len,
group_size * feature.size(2))
return reshape_feature
def forward(self, input_0):
primals_5 = self.attn.rel_h
primals_6 = self.attn.rel_w
primals_2 = self.attn.key_conv.weight
primals_3 = self.attn.query_conv.weight
primals_4 = self.attn.value_conv.weight
primals_7 = self.dense.weight
primals_8 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| Sam-limyr/End-to-end-ASR-Pytorch | ANNClassifier | false | 1,035 | [
"MIT"
] | 0 | 623a50792f48218228549ea17b8ea5e8bb1b342f | https://github.com/Sam-limyr/End-to-end-ASR-Pytorch/tree/623a50792f48218228549ea17b8ea5e8bb1b342f | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class AttentionConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, groups=1, bias=False):
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
assert self.out_channels % self.groups == 0, 'out_channels should be divided by groups. (example: out_channels: 40, groups: 4)'
self.rel_h = nn.Parameter(torch.randn(out_channels // 2, 1, 1,
kernel_size, 1), requires_grad=True)
self.rel_w = nn.Parameter(torch.randn(out_channels // 2, 1, 1, 1,
kernel_size), requires_grad=True)
self.key_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1,
bias=bias)
self.query_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.value_conv = nn.Conv2d(in_channels, out_channels, kernel_size=
1, bias=bias)
self.reset_parameters()
def forward(self, x):
batch, _channels, height, width = x.size()
padded_x = F.pad(x, [self.padding, self.padding, self.padding, self
.padding])
q_out = self.query_conv(x)
k_out = self.key_conv(padded_x)
v_out = self.value_conv(padded_x)
k_out = k_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
v_out = v_out.unfold(2, self.kernel_size, self.stride).unfold(3,
self.kernel_size, self.stride)
k_out_h, k_out_w = k_out.split(self.out_channels // 2, dim=1)
k_out = torch.cat((k_out_h + self.rel_h, k_out_w + self.rel_w), dim=1)
k_out = k_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
v_out = v_out.contiguous().view(batch, self.groups, self.
out_channels // self.groups, height, width, -1)
q_out = q_out.view(batch, self.groups, self.out_channels // self.
groups, height, width, 1)
out = q_out * k_out
out = F.softmax(out, dim=-1)
out = torch.einsum('bnchwk,bnchwk -> bnchw', out, v_out).view(batch,
-1, height, width)
return out
def reset_parameters(self):
init.kaiming_normal_(self.key_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.value_conv.weight, mode='fan_out',
nonlinearity='relu')
init.kaiming_normal_(self.query_conv.weight, mode='fan_out',
nonlinearity='relu')
init.normal_(self.rel_h, 0, 1)
init.normal_(self.rel_w, 0, 1)
class BaseClassifier(nn.Module):
_timestep_dimension = 2
_max_timesteps = 2048
_value_to_pad = 0
@staticmethod
def pad_variable_timesteps(tensor, timestep_dimension=
_timestep_dimension, max_timesteps=_max_timesteps, value_to_pad=
_value_to_pad):
"""
Pads a variable-length tensor to a fixed length along the specified dimension.
e.g. shape [1, 1, 200, 1280] --> [1, 1, 512, 1280], with dim=2.
timestep_dimension: Dimension to pad along. For shape [a, b, c, d], the respective dims are [0, 1, 2, 3].
max_timesteps: Number of elements to pad until.
value_to_pad: Constant value used for padding.
"""
number_of_timesteps = tensor.size(dim=timestep_dimension)
assert number_of_timesteps <= max_timesteps, 'Input received that is longer than . Unable to pad.'
number_of_padded_timesteps = max_timesteps - number_of_timesteps
padding = [0, 0] * (len(tensor.shape) - timestep_dimension - 1) + [
0, number_of_padded_timesteps]
return F.pad(tensor, padding, 'constant', value_to_pad)
class Model(BaseClassifier):
def __init__(self, input_dim):
# ... truncated (>4000 chars) for memory efficiency |
ActNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/nz/cnzkzycmku6msmsiehonl6lvkxkalhor6qvech326gpy2lqc3hoy.py
# Topologically Sorted Source Nodes: [logdet], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# logdet => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%unsqueeze,), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/w6/cw6fwvvd65omur5x74r5pig6wm7q3m5ho7q4pfmcjdleaujxnwkx.py
# Topologically Sorted Source Nodes: [exp, mul, add], Original ATen: [aten.exp, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# exp => exp
# mul => mul
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%unsqueeze,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %exp), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %unsqueeze_1), kwargs = {})
triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [logdet], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(primals_1, buf0, 1, 4, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, mul, add], Original ATen: [aten.exp, aten.mul, aten.add]
triton_poi_fused_add_exp_mul_1.run(primals_2, primals_1, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import Parameter
class ActNorm(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNorm, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :]
def shift(self):
return self._shift[None, :]
def forward(self, x):
if not self._init:
with torch.no_grad():
assert self.num_channels == x.size(1)
mean = torch.transpose(x, 0, 1).contiguous().view(self.
num_channels, -1).mean(dim=1)
zero_mean = x - mean[None, :]
var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view(
self.num_channels, -1).mean(dim=1)
std = (var + self.eps) ** 0.5
log_scale = torch.log(1.0 / std)
self._shift.data = -mean * torch.exp(log_scale)
self._log_scale.data = log_scale
self._init = True
log_scale = self.log_scale()
logdet = log_scale.sum()
return x * torch.exp(log_scale) + self.shift(), logdet
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp3, None)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](primals_1, buf0, 1, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_1[grid(256)](primals_2, primals_1,
primals_3, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf1, buf0, primals_1, primals_2
class ActNormNew(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super(ActNormNew, self).__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :]
def shift(self):
return self._shift[None, :]
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def forward(self, input_0):
primals_1 = self._log_scale
primals_3 = self._shift
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
| Schwartz-Zha/My-invertible-resnet | ActNorm | false | 1,036 | [
"MIT"
] | 0 | 5415975bb0d640f3bf3ef4a7b986563e84109270 | https://github.com/Schwartz-Zha/My-invertible-resnet/tree/5415975bb0d640f3bf3ef4a7b986563e84109270 | import torch
import torch.nn as nn
from torch.nn import Parameter
class Model(nn.Module):
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.eps = eps
self.num_channels = num_channels
self._log_scale = Parameter(torch.Tensor(num_channels))
self._shift = Parameter(torch.Tensor(num_channels))
self._init = False
def log_scale(self):
return self._log_scale[None, :]
def shift(self):
return self._shift[None, :]
def forward(self, x):
if not self._init:
with torch.no_grad():
assert self.num_channels == x.size(1)
mean = torch.transpose(x, 0, 1).contiguous().view(self.
num_channels, -1).mean(dim=1)
zero_mean = x - mean[None, :]
var = torch.transpose(zero_mean ** 2, 0, 1).contiguous().view(
self.num_channels, -1).mean(dim=1)
std = (var + self.eps) ** 0.5
log_scale = torch.log(1.0 / std)
self._shift.data = -mean * torch.exp(log_scale)
self._log_scale.data = log_scale
self._init = True
log_scale = self.log_scale()
logdet = log_scale.sum()
return x * torch.exp(log_scale) + self.shift(), logdet
def inverse(self, x):
return (x - self.shift()) * torch.exp(-self.log_scale())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.